Collected by Alex Krizhevsky, Vinod Nair, and Geoffrey Hinton.State of the art accuracy is at 96.808 as of 2022
Research on the dataset:
Cifar 100 contains 60 thousand tiny dataset images. They are colored which means they have 3 channels and each channel has 32 by 32 pixels. The classes are completely mutually exclusive. There is no overlap between automobiles and trucks. "Automobile" includes sedans, SUVs, things of that sort. "Truck" includes only big trucks. Neither includes pickup trucks.
| SuperClass | Classes |
|---|---|
| aquatic mammals | beaver, dolphin, otter, seal, whale |
| fish | aquarium fish, flatfish, ray, shark, trout |
| flowers | orchids, poppies, roses, sunflowers, tulips |
| food containers | bottles, bowls, cans, cups, plates |
| fruit and vegetables | apples, mushrooms, oranges, pears, sweet peppers |
| household electrical devices | clock, computer keyboard, lamp, telephone, television |
| household furniture | bed, chair, couch, table, wardrobe |
| insects | bee, beetle, butterfly, caterpillar, cockroach |
| large carnivores | bear, leopard, lion, tiger, wolf |
| large man-made outdoor things | bridge, castle, house, road, skyscraper |
| large natural outdoor scenes | cloud, forest, mountain, plain, sea |
| large omnivores and herbivores | camel, cattle, chimpanzee, elephant, kangaroo |
| medium-sized mammals | fox, porcupine, possum, raccoon, skunk |
| non-insect invertebrates | crab, lobster, snail, spider, worm |
| people | baby, boy, girl, man, woman |
| reptiles | crocodile, dinosaur, lizard, snake, turtle |
| small mammals | hamster, mouse, rabbit, shrew, squirrel |
| trees | maple, oak, palm, pine, willow |
| vehicles_1 | bicycle, bus, motorcycle, pickup truck, train |
| vehicles_2 | lawn-mower, rocket, streetcar, tank, tractor |
# pip install keras-tuner --upgrade
import tensorflow
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from keras.datasets import cifar100
from tensorflow.keras.layers import Conv2D,Flatten,MaxPooling2D,Dense,Flatten,Dropout,BatchNormalization,LeakyReLU
from numpy import mean
from numpy import std
from matplotlib import pyplot
from sklearn.model_selection import KFold
from keras.utils import to_categorical
from keras.models import Sequential
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping,ModelCheckpoint,ReduceLROnPlateau
from keras.preprocessing.image import ImageDataGenerator
from keras.models import load_model
from keras import regularizers
from sklearn.model_selection import train_test_split
import keras_cv
from numpy import mean
from numpy import std
from matplotlib import pyplot
from keras.optimizers import SGD
import gc
import tensorflow as tf
from tensorflow import keras
from sklearn.metrics import precision_score , recall_score , f1_score , accuracy_score , confusion_matrix
from keras import layers
from keras.initializers import RandomNormal,Constant
import visualkeras
(X_train, y_train), (X_test, y_test) = cifar100.load_data(label_mode='fine')
assert X_train.shape == (50000, 32, 32, 3)
assert X_test.shape == (10000, 32, 32, 3)
assert y_train.shape == (50000, 1)
assert y_test.shape == (10000, 1)
# split into train and val sets using train_test_split
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=42, stratify=y_train)
print("X_train shape:", X_train.shape, "y_train shape:", y_train.shape)
print("X_test shape:", X_test.shape, "y_test shape:", y_test.shape)
# print unique classes count
print("Unique classes count:", len(np.unique(y_train)))
# print unique classes
y_train_df = pd.DataFrame(y_train)
print(y_train_df[0].value_counts())
y_test_labels = y_test
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
y_val = to_categorical(y_val)
print('after',y_train.shape,y_test.shape)
X_train shape: (40000, 32, 32, 3) y_train shape: (40000, 1)
X_test shape: (10000, 32, 32, 3) y_test shape: (10000, 1)
Unique classes count: 100
4 400
2 400
84 400
15 400
37 400
...
62 400
41 400
12 400
0 400
93 400
Name: 0, Length: 100, dtype: int64
after (40000, 100) (10000, 100)
print(y_train_df[0].value_counts())
4 400
2 400
84 400
15 400
37 400
...
62 400
41 400
12 400
0 400
93 400
Name: 0, Length: 100, dtype: int64
# show some images of the dataset
fig, axes = plt.subplots(3, 3, figsize=(10, 10))
for i, ax in enumerate(axes.flat):
ax.imshow(X_train[i])
ax.set(xticks=[], yticks=[])
plt.show()
# Functions for plotting
import plotly
from plotly import tools
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
import plotly.graph_objs as go
from plotly.subplots import make_subplots
def create_trace(x,y,ylabel,color):
trace = go.Scatter(
x = x,y = y,
name=ylabel,
marker=dict(color=color),
mode = "markers+lines",
text=x
)
return trace
def plot_accuracy_and_loss(train_model):
hist = train_model.history
acc = hist['accuracy']
val_acc = hist['val_accuracy']
loss = hist['loss']
val_loss = hist['val_loss']
epochs = list(range(1,len(acc)+1))
trace_ta = create_trace(epochs,acc,"Training accuracy", "Green")
trace_va = create_trace(epochs,val_acc,"Validation accuracy", "Red")
trace_tl = create_trace(epochs,loss,"Training loss", "Blue")
trace_vl = create_trace(epochs,val_loss,"Validation loss", "Magenta")
fig = make_subplots(rows=1,cols=2, subplot_titles=('Training and validation accuracy',
'Training and validation loss'))
fig.append_trace(trace_ta,1,1)
fig.append_trace(trace_va,1,1)
fig.append_trace(trace_tl,1,2)
fig.append_trace(trace_vl,1,2)
fig['layout']['xaxis'].update(title = 'Epoch')
fig['layout']['xaxis2'].update(title = 'Epoch')
fig['layout']['yaxis'].update(title = 'Accuracy', range=[0,1])
fig['layout']['yaxis2'].update(title = 'Loss', range=[0,1])
iplot(fig, filename=f'accuracy-loss_{train_model}')
CIFAR 100 seems to be alot harder for a simple CNN model to recognize

Image is different from a typical MNIST dataset as it is now coloured, below is a sneak peak of what goes on in the code below
def get_metrics(model,X_test = X_test):
predictions = np.argmax(model.predict(X_test), axis=-1)
precision = precision_score(y_test_labels, predictions, average='macro')
recall = recall_score(y_test_labels, predictions, average='macro')
f1 = f1_score(y_test_labels, predictions, average='macro')
return precision,recall,f1
epochs = 15
num_classes = 100
seed = np.random.seed(1)
cifar_model = Sequential()
cifar_model.add(Conv2D(32, kernel_size=(3, 3),activation='relu',input_shape=(32,32,3)))
cifar_model.add(Conv2D(64, (3, 3), activation='relu'))
cifar_model.add(Flatten())
cifar_model.add(Dense(128, activation='relu'))
cifar_model.add(Dense(num_classes, activation='softmax'))
cifar_model.compile(loss='categorical_crossentropy', optimizer=Adam(),metrics=['accuracy'])
cifar_model.summary()
h_callback = cifar_model.fit(X_train, y_train, epochs = 10,
validation_data=(X_val, y_val))
# Plot train vs test loss during training
plot_accuracy_and_loss(h_callback)
# function to get precision, recall and f1 score
test_loss , test_acc = cifar_model.evaluate(X_test, y_test)
precision,recall,f1 = get_metrics(cifar_model)
# Dataframe to keep track of all model scores
Model_scores = pd.DataFrame([['CNN baseline',test_acc,test_loss,precision,recall,f1]],columns=['Model','Accuracy','Loss','Precision','Recall','F1 Score'])
#Understand the filters in the model
#Let us pick the first hidden layer as the layer of interest.
layer = cifar_model.layers #Conv layers at 0,
filters, biases = cifar_model.layers[0].get_weights()
print(layer[0].name, filters.shape)
# plot filters
fig1=plt.figure(figsize=(8, 12))
columns = 8
rows = 8
n_filters = 32 ## the number of filters in our first layer
for i in range(1, n_filters + 1):
f = filters[:, :, :, i-1]
fig1 =plt.subplot(rows, columns, i)
fig1.set_xticks([]) #Turn off axis
fig1.set_yticks([])
plt.imshow(f[:, :, 0], cmap='gray') #Show only the filters from 0th channel (R)
#ix += 1
plt.show()
# plot feature maps after first convolutional layer
from keras.models import Model
layer = cifar_model.layers[0]
intermediate_model = Model(inputs=cifar_model.input, outputs=layer.output)
intermediate_output = intermediate_model.predict(X_test)
plt.figure(figsize=(8, 12))
columns = 8
rows = 8
n_filters = 32 ## the number of filters in our first layer
for i in range(1, n_filters + 1):
f = intermediate_output[0, :, :, i-1]
fig1 =plt.subplot(rows, columns, i)
fig1.set_xticks([]) #Turn off axis
fig1.set_yticks([])
plt.imshow(f)
gc.collect()
tf.keras.backend.clear_session()
del cifar_model
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 30, 30, 32) 896
conv2d_1 (Conv2D) (None, 28, 28, 64) 18496
flatten (Flatten) (None, 50176) 0
dense (Dense) (None, 128) 6422656
dense_1 (Dense) (None, 100) 12900
=================================================================
Total params: 6,454,948
Trainable params: 6,454,948
Non-trainable params: 0
_________________________________________________________________
Epoch 1/10
1250/1250 [==============================] - 14s 7ms/step - loss: 6.6217 - accuracy: 0.0164 - val_loss: 4.5867 - val_accuracy: 0.0258
Epoch 2/10
1250/1250 [==============================] - 9s 7ms/step - loss: 4.5194 - accuracy: 0.0302 - val_loss: 4.5842 - val_accuracy: 0.0207
Epoch 3/10
1250/1250 [==============================] - 9s 7ms/step - loss: 4.3390 - accuracy: 0.0643 - val_loss: 4.5635 - val_accuracy: 0.0365
Epoch 4/10
1250/1250 [==============================] - 9s 7ms/step - loss: 3.9535 - accuracy: 0.1319 - val_loss: 4.7098 - val_accuracy: 0.0487
Epoch 5/10
1250/1250 [==============================] - 8s 7ms/step - loss: 3.4158 - accuracy: 0.2338 - val_loss: 5.2386 - val_accuracy: 0.0511
Epoch 6/10
1250/1250 [==============================] - 8s 7ms/step - loss: 2.8462 - accuracy: 0.3507 - val_loss: 6.2830 - val_accuracy: 0.0598
Epoch 7/10
1250/1250 [==============================] - 8s 7ms/step - loss: 2.3773 - accuracy: 0.4504 - val_loss: 7.5123 - val_accuracy: 0.0554
Epoch 8/10
1250/1250 [==============================] - 8s 6ms/step - loss: 1.9910 - accuracy: 0.5365 - val_loss: 9.4144 - val_accuracy: 0.0553
Epoch 9/10
1250/1250 [==============================] - 8s 6ms/step - loss: 1.7002 - accuracy: 0.5990 - val_loss: 11.0640 - val_accuracy: 0.0549
Epoch 10/10
1250/1250 [==============================] - 8s 6ms/step - loss: 1.4842 - accuracy: 0.6536 - val_loss: 12.6997 - val_accuracy: 0.0574
313/313 [==============================] - 1s 4ms/step - loss: 12.4953 - accuracy: 0.0633 313/313 [==============================] - 1s 2ms/step conv2d (3, 3, 3, 32)
313/313 [==============================] - 0s 1ms/step
plt.imshow(X_train[12])
plt.show()
X_train = X_train/255
X_test = X_test/255
X_val = X_val/255
X_train_augmented = X_train.copy()
y_train_augmented = y_train.copy()
print('before augmentation',X_train.shape,y_train.shape)
# ImageDataGenerator rotation
datagen_rotate = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=90,
)
# ImageDataGenerator rotation
datagen_shear = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
shear_range=0.4
)
# ImageDataGenerator rotation
datagen_flip = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
horizontal_flip=True,
vertical_flip=True)
# ImageDataGenerator rotation
datagen_channel = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
channel_shift_range=0.5
)
datagen_rotate.fit(X_train_augmented)
X_train_augmented_rotate = datagen_rotate.flow(X_train_augmented,y_train_augmented, batch_size = X_train_augmented.shape[0], shuffle=False).next()
# change X_train_augmented to float32
X_train_augmented_rotate = X_train_augmented_rotate[0].astype('float32')
datagen_shear.fit(X_train_augmented)
X_train_augmented_shear = datagen_shear.flow(X_train_augmented,y_train_augmented, batch_size = X_train_augmented.shape[0], shuffle=False).next()
# change X_train_augmented to float32
X_train_augmented_shear = X_train_augmented_shear[0].astype('float32')
datagen_flip.fit(X_train_augmented)
X_train_augmented_flip = datagen_flip.flow(X_train_augmented,y_train_augmented, batch_size = X_train_augmented.shape[0], shuffle=False).next()
# change X_train_augmented to float32
X_train_augmented_flip = X_train_augmented_flip[0].astype('float32')
datagen_channel.fit(X_train_augmented)
X_train_augmented_channel = datagen_channel.flow(X_train_augmented,y_train_augmented, batch_size = X_train_augmented.shape[0], shuffle=False).next()
# change X_train_augmented to float32
X_train_augmented_channel = X_train_augmented_channel[0].astype('float32')
# concat the original and augmented data
X_train = np.concatenate((X_train,X_train_augmented_rotate),axis=0)
y_train = np.concatenate((y_train,y_train_augmented),axis=0)
# concat the original and augmented data
X_train = np.concatenate((X_train,X_train_augmented_shear),axis=0)
y_train = np.concatenate((y_train,y_train_augmented),axis=0)
# concat the original and augmented data
X_train = np.concatenate((X_train,X_train_augmented_flip),axis=0)
y_train = np.concatenate((y_train,y_train_augmented),axis=0)
# concat the original and augmented data
X_train = np.concatenate((X_train,X_train_augmented_channel),axis=0)
y_train = np.concatenate((y_train,y_train_augmented),axis=0)
plt.imshow(X_train[40012])
plt.show()
# multiply by 2 each time it runs
datagen_mult = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
horizontal_flip=True,
vertical_flip=True,
rotation_range=90
)
datagen_mult.fit(X_train)
iterator = datagen_mult.flow(X_train,y_train, batch_size = X_train.shape[0], shuffle=False)
X_train_augmented1 = iterator.next()[0]
y_train_augmented = iterator.next()[1]
# change X_train_augmented to float32
X_train_augmented1 = X_train_augmented1.astype('float32')
X_train = np.concatenate((X_train,X_train_augmented1),axis=0)
y_train = np.concatenate((y_train,y_train),axis=0)
before augmentation (40000, 32, 32, 3) (40000, 100)
print('after augmentation',X_train.shape,y_train.shape)
after augmentation (400000, 32, 32, 3) (400000, 100)
# baseline cnn model for cifar mnist
def plot_accuracy_and_loss(train_model):
hist = train_model.history
acc = hist['accuracy']
val_acc = hist['val_accuracy']
loss = hist['loss']
val_loss = hist['val_loss']
epochs = list(range(1,len(acc)+1))
trace_ta = create_trace(epochs,acc,"Training accuracy", "Green")
trace_va = create_trace(epochs,val_acc,"Validation accuracy", "Red")
trace_tl = create_trace(epochs,loss,"Training loss", "Blue")
trace_vl = create_trace(epochs,val_loss,"Validation loss", "Magenta")
fig = make_subplots(rows=1,cols=2, subplot_titles=('Training and validation accuracy',
'Training and validation loss'))
fig.append_trace(trace_ta,1,1)
fig.append_trace(trace_va,1,1)
fig.append_trace(trace_tl,1,2)
fig.append_trace(trace_vl,1,2)
fig['layout']['xaxis'].update(title = 'Epoch')
fig['layout']['xaxis2'].update(title = 'Epoch')
fig['layout']['yaxis'].update(title = 'Accuracy', range=[0,1])
fig['layout']['yaxis2'].update(title = 'Loss', range=[0,1])
iplot(fig, filename=f'accuracy-loss_{train_model}')
# evaluate a model using k-fold cross-validation
def evaluate_model(model,dataX, dataY, n_folds=5,valX=X_val,valy=y_val,X_test=X_test,y_test=y_test):
scores, histories = list(), list()
# prepare cross validation
kfold = KFold(n_folds, shuffle=True, random_state=1)
# enumerate splits
for train_ix, test_ix in kfold.split(dataX):
# select rows for train and test
trainX, trainY, testX, testY = dataX[train_ix], dataY[train_ix], dataX[test_ix], dataY[test_ix]
h_callback = EarlyStopping(monitor='val_loss', patience=5)
# fit model
history = model.fit(trainX, trainY, epochs=50, validation_data=(testX, testY), verbose=0,callbacks=[h_callback], batch_size=32)
# evaluate model
_, acc = model.evaluate(X_test, y_test, verbose=0)
print('> %.3f' % (acc * 100.0))
# append scores
scores.append(acc)
histories.append(history)
return scores, histories
# plot diagnostic learning curves
def summarize_diagnostics(histories):
for i in range(len(histories)):
# plot loss
pyplot.subplot(211)
pyplot.title('Cross Entropy Loss')
pyplot.plot(histories[i].history['loss'], color='blue', label='train')
pyplot.plot(histories[i].history['val_loss'], color='orange', label='test')
# plot accuracy
pyplot.subplot(212)
pyplot.title('Classification Accuracy')
pyplot.plot(histories[i].history['accuracy'], color='blue', label='train')
pyplot.plot(histories[i].history['val_accuracy'], color='orange', label='test')
pyplot.legend()
pyplot.show()
# summarize model performance
def summarize_performance(scores):
# print summary
print('Accuracy: mean=%.3f std=%.3f, n=%d' % (mean(scores)*100, std(scores)*100, len(scores)))
# box and whisker plots of results
pyplot.boxplot(scores)
pyplot.show()
# run the test harness for evaluating a model
def run_test_harness(model,X_train, y_train):
# evaluate model
scores, histories = evaluate_model(model,X_train, y_train)
# learning curves
summarize_diagnostics(histories)
# summarize estimated performance
summarize_performance(scores)
# ran into memory leakage problem trying to fix with this
gc.collect()
tf.keras.backend.clear_session()
del model
def plot_loss(loss,test_loss):
plt.figure()
plt.plot(loss)
plt.plot(test_loss)
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper right')
plt.show()
def plot_accuracy(acc,test_acc):
plt.figure()
plt.plot(acc)
plt.plot(test_acc)
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper right')
plt.show()
# function to get precision, recall and f1 score
def get_metrics(model,X_test = X_test):
predictions = np.argmax(model.predict(X_test), axis=-1)
precision = precision_score(y_test_labels, predictions, average='macro')
recall = recall_score(y_test_labels, predictions, average='macro')
f1 = f1_score(y_test_labels, predictions, average='macro')
return precision,recall,f1
def model_max_pooling():
# define model
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),activation='relu',input_shape=(32,32,3), padding='same'))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3), activation='relu' , padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(256, (3, 3), activation='relu' , padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
# compile model
model.compile(loss='categorical_crossentropy', optimizer=tensorflow.keras.optimizers.Adam(),metrics=['accuracy'])
model.summary()
return model
model_max_pooling = model_max_pooling()
run_test_harness(model_max_pooling,X_train, y_train)
model_max_pooling.fit(X_train, y_train, epochs=50, batch_size=32, verbose=0)
test_loss, test_acc = model_max_pooling.evaluate(X_test, y_test, verbose=2)
precision,recall,f1 = get_metrics(model_max_pooling)
Model_scores = pd.concat([Model_scores,pd.DataFrame([['simple CNN relu max pooling',test_acc,test_loss,precision,recall,f1]],columns=Model_scores.columns)],ignore_index=True)
visualkeras.layered_view(model_max_pooling)
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 32, 32, 32) 896
max_pooling2d (MaxPooling2D (None, 16, 16, 32) 0
)
conv2d_1 (Conv2D) (None, 16, 16, 64) 18496
max_pooling2d_1 (MaxPooling (None, 8, 8, 64) 0
2D)
conv2d_2 (Conv2D) (None, 8, 8, 128) 73856
max_pooling2d_2 (MaxPooling (None, 4, 4, 128) 0
2D)
conv2d_3 (Conv2D) (None, 4, 4, 256) 295168
max_pooling2d_3 (MaxPooling (None, 2, 2, 256) 0
2D)
flatten (Flatten) (None, 1024) 0
dense (Dense) (None, 128) 131200
dense_1 (Dense) (None, 100) 12900
=================================================================
Total params: 532,516
Trainable params: 532,516
Non-trainable params: 0
_________________________________________________________________
> 31.930
> 31.170
> 29.850
> 28.940
> 29.100
Accuracy: mean=30.198 std=1.171, n=5
313/313 - 1s - loss: 10.6067 - accuracy: 0.2764 - 1s/epoch - 4ms/step 313/313 [==============================] - 1s 3ms/step
from keras.layers import AveragePooling2D
def model_average_pooling():
# define model
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),activation='relu',input_shape=(32,32,3) , padding='same'))
model.add(AveragePooling2D((2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(AveragePooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
model.add(AveragePooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3), activation='relu', padding ='same') )
model.add(AveragePooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
# compile model
model.compile(loss='categorical_crossentropy', optimizer=tensorflow.keras.optimizers.Adam(),metrics=['accuracy'])
return model
model_average_pooling = model_average_pooling()
run_test_harness(model_average_pooling,X_train, y_train)
model_average_pooling.fit(X_train, y_train, epochs=50, batch_size=32, verbose=0)
test_loss , test_acc = model_average_pooling.evaluate(X_test, y_test, verbose=0)
precision,recall,f1 = get_metrics(model_average_pooling)
Model_scores = pd.concat([Model_scores,pd.DataFrame([['simple CNN relu avg pooling',test_acc,test_loss,precision,recall,f1]],columns=Model_scores.columns)],ignore_index=True)
> 35.570 > 35.720 > 35.100 > 35.180 > 33.640
Accuracy: mean=35.042 std=0.738, n=5
313/313 [==============================] - 1s 4ms/step
def model_avg_mix_max_pool():
# define model
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),activation='relu',input_shape=(32,32,3) , padding='same'))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu' , padding='same'))
model.add(AveragePooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3), activation='relu' , padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3), activation='relu' , padding='same'))
model.add(AveragePooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
# compile model
model.compile(loss='categorical_crossentropy', optimizer=tensorflow.keras.optimizers.Adam(),metrics=['accuracy'])
return model
model_avgmax_pooling = model_avg_mix_max_pool()
run_test_harness(model_avgmax_pooling,X_train, y_train)
model_avgmax_pooling.fit(X_train, y_train, epochs=50, batch_size=32, verbose=0)
test_loss , test_acc = model_avgmax_pooling.evaluate(X_test, y_test, verbose=0)
precision,recall,f1 = get_metrics(model_avgmax_pooling)
Model_scores = pd.concat([Model_scores,pd.DataFrame([['simple CNN relu avg mix max pooling',test_acc,test_loss,precision,recall,f1]],columns=Model_scores.columns)],ignore_index=True)
> 35.740 > 36.670 > 34.450 > 32.420 > 32.530
Accuracy: mean=34.362 std=1.695, n=5
313/313 [==============================] - 1s 3ms/step
I did not expect this but it seems like Average Pooling is working better than Max Pooling , I will have keep this in mind and retry Average Pooling on a deeper network later.

Padding is important as it allows the network to learn the edges of the image. Without padding the network will not be able to learn the edges of the image and will only learn the center of the image.
def model_padding():
# define model
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),activation='relu',input_shape=(32,32,3), padding='same'))
model.add(MaxPooling2D((2, 2), padding='same'))
model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
# compile model
model.compile(loss='categorical_crossentropy', optimizer=tensorflow.keras.optimizers.Adam(),metrics=['accuracy'])
return model
model_average_pooling_padding = model_padding()
visualkeras.layered_view(model_average_pooling_padding)
run_test_harness(model_average_pooling_padding,X_train, y_train)
model_average_pooling_padding.fit(X_train, y_train, epochs=50, batch_size=32, verbose=0)
test_loss, test_acc = model_average_pooling_padding.evaluate(X_test, y_test, verbose=0)
precision,recall,f1 = get_metrics(model_average_pooling_padding)
Model_scores = pd.concat([Model_scores,pd.DataFrame([['CNN relu avg pooling w same padding ',test_acc,test_loss,precision,recall,f1]],columns=Model_scores.columns)],ignore_index=True)
> 33.820 > 33.650 > 31.330 > 31.490 > 30.030
Accuracy: mean=32.064 std=1.456, n=5
313/313 [==============================] - 1s 3ms/step
visualkeras.layered_view(model_average_pooling_padding)
def model_validpadding():
# define model
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),activation='relu',input_shape=(32,32,3), padding='valid'))
model.add(MaxPooling2D((2, 2), padding='valid'))
model.add(Conv2D(64, (3, 3), activation='relu', padding='valid'))
model.add(MaxPooling2D(pool_size=(2, 2), padding='valid'))
model.add(Conv2D(128, (3, 3), activation='relu', padding='valid'))
model.add(MaxPooling2D(pool_size=(2, 2), padding='valid'))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
# compile model
model.compile(loss='categorical_crossentropy', optimizer=tensorflow.keras.optimizers.Adam(),metrics=['accuracy'])
return model
modelmaxpool_valid_padding = model_validpadding()
run_test_harness(modelmaxpool_valid_padding,X_train, y_train)
modelmaxpool_valid_padding.fit(X_train, y_train, epochs=50, batch_size=32, verbose=0)
test_loss, test_acc = modelmaxpool_valid_padding.evaluate(X_test, y_test, verbose=0)
precision,recall,f1 = get_metrics(modelmaxpool_valid_padding)
Model_scores = pd.concat([Model_scores,pd.DataFrame([['CNN relu avg pooling w valid padding ',test_acc,test_loss,precision,recall,f1]],columns=Model_scores.columns)],ignore_index=True)
> 32.930 > 34.110 > 33.960 > 33.200 > 32.330
Accuracy: mean=33.306 std=0.660, n=5
313/313 [==============================] - 1s 3ms/step
visualkeras.layered_view(modelmaxpool_valid_padding)

Default is valid padding which means no padding is added to the image. The image is reduced in size as the filter is moved across the image. However I will have to take note that with valid padding the image will be reduced in size and the model will not be able to learn as much as it would with padding. On top of that , the output shrinks as the filter is moved across the image with a stride of 1 if using valid padding and may cause an error if there are too many convolutional layers because the image will become of size 0 so will still have to take note of using 'same' padding.
from keras.layers import BatchNormalization
from keras.layers import Activation
def cifar_model_no_batch_norm():
model = Sequential()
model.add(Conv2D(256,(3,3),padding='same',input_shape=(32,32,3)))
model.add(Activation('relu'))
model.add(Conv2D(256,(3,3),padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(512,(3,3),padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(512,(3,3),padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(512,(3,3),padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(512,(3,3),padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(512,(3,3),padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(512,(3,3),padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dense(100,activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer='adamax',
metrics=['accuracy'])
return model
cifar_model_no_batch_norm = cifar_model_no_batch_norm()
run_test_harness(cifar_model_no_batch_norm,X_train, y_train)
cifar_model_no_batch_norm.fit(X_train, y_train, epochs=50, batch_size=32, verbose=0)
test_loss, test_acc = cifar_model_no_batch_norm.evaluate(X_test, y_test, verbose=0)
precision,recall,f1 = get_metrics(cifar_model_no_batch_norm)
Model_scores = pd.concat([Model_scores,pd.DataFrame([['deep CNN relu adamax no batch norm',test_acc,test_loss,precision,recall,f1]],columns=Model_scores.columns)],ignore_index=True)
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 32, 32, 256) 7168
activation (Activation) (None, 32, 32, 256) 0
conv2d_1 (Conv2D) (None, 32, 32, 256) 590080
activation_1 (Activation) (None, 32, 32, 256) 0
max_pooling2d (MaxPooling2D (None, 16, 16, 256) 0
)
conv2d_2 (Conv2D) (None, 16, 16, 512) 1180160
activation_2 (Activation) (None, 16, 16, 512) 0
conv2d_3 (Conv2D) (None, 16, 16, 512) 2359808
activation_3 (Activation) (None, 16, 16, 512) 0
max_pooling2d_1 (MaxPooling (None, 8, 8, 512) 0
2D)
conv2d_4 (Conv2D) (None, 8, 8, 512) 2359808
activation_4 (Activation) (None, 8, 8, 512) 0
conv2d_5 (Conv2D) (None, 8, 8, 512) 2359808
activation_5 (Activation) (None, 8, 8, 512) 0
max_pooling2d_2 (MaxPooling (None, 4, 4, 512) 0
2D)
conv2d_6 (Conv2D) (None, 4, 4, 512) 2359808
activation_6 (Activation) (None, 4, 4, 512) 0
conv2d_7 (Conv2D) (None, 4, 4, 512) 2359808
activation_7 (Activation) (None, 4, 4, 512) 0
max_pooling2d_3 (MaxPooling (None, 2, 2, 512) 0
2D)
flatten (Flatten) (None, 2048) 0
dense (Dense) (None, 512) 1049088
activation_8 (Activation) (None, 512) 0
dense_1 (Dense) (None, 100) 51300
=================================================================
Total params: 14,676,836
Trainable params: 14,676,836
Non-trainable params: 0
_________________________________________________________________
> 40.870
> 42.490
> 42.630
> 43.030
> 42.920
Accuracy: mean=42.388 std=0.783, n=5
313/313 [==============================] - 6s 19ms/step
visualkeras.layered_view(cifar_model_no_batch_norm)
del cifar_model_no_batch_norm
gc.collect()
tf.keras.backend.clear_session()
%%time
from keras.layers import BatchNormalization,MaxPool2D
def cifar_model_batch_norm():
model = Sequential()
model.add(Conv2D(256,(3,3),padding='same',input_shape=(32,32,3)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(256,(3,3),padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Conv2D(512,(3,3),padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(512,(3,3),padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Conv2D(512,(3,3),padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(512,(3,3),padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Conv2D(512,(3,3),padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(512,(3,3),padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(BatchNormalization(momentum=0.95,
epsilon=0.005,
beta_initializer=RandomNormal(mean=0.0, stddev=0.05),
gamma_initializer=Constant(value=0.9)))
model.add(Dense(100,activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer='adamax',
metrics=['accuracy'])
return model
cifar_model_batch_norm = cifar_model_batch_norm()
run_test_harness(cifar_model_batch_norm,X_train, y_train)
cifar_model_batch_norm.fit(X_train, y_train, epochs=50, batch_size=32, verbose=0)
test_loss, test_acc = cifar_model_batch_norm.evaluate(X_test, y_test, verbose=0)
precision,recall,f1 = get_metrics(cifar_model_batch_norm)
Model_scores = pd.concat([Model_scores,pd.DataFrame([['deep CNN relu adamax w batch norm',test_acc,test_loss,precision,recall,f1]],columns=Model_scores.columns)],ignore_index=True)
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 32, 32, 256) 7168
batch_normalization (BatchN (None, 32, 32, 256) 1024
ormalization)
activation (Activation) (None, 32, 32, 256) 0
conv2d_1 (Conv2D) (None, 32, 32, 256) 590080
batch_normalization_1 (Batc (None, 32, 32, 256) 1024
hNormalization)
activation_1 (Activation) (None, 32, 32, 256) 0
max_pooling2d (MaxPooling2D (None, 16, 16, 256) 0
)
conv2d_2 (Conv2D) (None, 16, 16, 512) 1180160
batch_normalization_2 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_2 (Activation) (None, 16, 16, 512) 0
conv2d_3 (Conv2D) (None, 16, 16, 512) 2359808
batch_normalization_3 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_3 (Activation) (None, 16, 16, 512) 0
max_pooling2d_1 (MaxPooling (None, 8, 8, 512) 0
2D)
conv2d_4 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_4 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_4 (Activation) (None, 8, 8, 512) 0
conv2d_5 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_5 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_5 (Activation) (None, 8, 8, 512) 0
max_pooling2d_2 (MaxPooling (None, 4, 4, 512) 0
2D)
conv2d_6 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_6 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_6 (Activation) (None, 4, 4, 512) 0
conv2d_7 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_7 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_7 (Activation) (None, 4, 4, 512) 0
max_pooling2d_3 (MaxPooling (None, 2, 2, 512) 0
2D)
flatten (Flatten) (None, 2048) 0
dense (Dense) (None, 512) 1049088
activation_8 (Activation) (None, 512) 0
batch_normalization_8 (Batc (None, 512) 2048
hNormalization)
dense_1 (Dense) (None, 100) 51300
=================================================================
Total params: 14,693,220
Trainable params: 14,685,028
Non-trainable params: 8,192
_________________________________________________________________
visualkeras.layered_view(cifar_model_batch_norm)
# del cifar_model_batch_norm
# gc.collect()
# tf.keras.backend.clear_session()
ALthough it is randomly dropping neruons but it generally improves the model performance. It will be good if it drops a bad neuron but even if it drops a good neuron the model still will be able to learn from other features.
def cifar_model_batch_norm_dropout():
model = Sequential()
model.add(Conv2D(256,(3,3),padding='same',input_shape=(32,32,3)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(256,(3,3),padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.2))
model.add(Conv2D(512,(3,3),padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(512,(3,3),padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.2))
model.add(Conv2D(512,(3,3),padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(512,(3,3),padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.2))
model.add(Conv2D(512,(3,3),padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(512,(3,3),padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(BatchNormalization(momentum=0.95,
epsilon=0.005,
beta_initializer=RandomNormal(mean=0.0, stddev=0.05),
gamma_initializer=Constant(value=0.9)))
model.add(Dense(100,activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer='adamax',
metrics=['accuracy'])
return model
cifar_dropout = cifar_model_batch_norm_dropout()
run_test_harness(cifar_dropout,X_train, y_train)
cifar_dropout.fit(X_train, y_train, epochs=50, batch_size=32, verbose=0)
test_loss, test_acc = cifar_dropout.evaluate(X_test,y_test)
precision,recall,f1 = get_metrics(cifar_dropout)
Model_scores = pd.concat([Model_scores,pd.DataFrame([['CNN relu avg pooling w padding ',test_acc,test_loss,precision,recall,f1]],columns=Model_scores.columns)],ignore_index=True)
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 32, 32, 256) 7168
batch_normalization (BatchN (None, 32, 32, 256) 1024
ormalization)
activation (Activation) (None, 32, 32, 256) 0
conv2d_1 (Conv2D) (None, 32, 32, 256) 590080
batch_normalization_1 (Batc (None, 32, 32, 256) 1024
hNormalization)
activation_1 (Activation) (None, 32, 32, 256) 0
max_pooling2d (MaxPooling2D (None, 16, 16, 256) 0
)
dropout (Dropout) (None, 16, 16, 256) 0
conv2d_2 (Conv2D) (None, 16, 16, 512) 1180160
batch_normalization_2 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_2 (Activation) (None, 16, 16, 512) 0
conv2d_3 (Conv2D) (None, 16, 16, 512) 2359808
batch_normalization_3 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_3 (Activation) (None, 16, 16, 512) 0
max_pooling2d_1 (MaxPooling (None, 8, 8, 512) 0
2D)
dropout_1 (Dropout) (None, 8, 8, 512) 0
conv2d_4 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_4 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_4 (Activation) (None, 8, 8, 512) 0
conv2d_5 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_5 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_5 (Activation) (None, 8, 8, 512) 0
max_pooling2d_2 (MaxPooling (None, 4, 4, 512) 0
2D)
dropout_2 (Dropout) (None, 4, 4, 512) 0
conv2d_6 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_6 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_6 (Activation) (None, 4, 4, 512) 0
conv2d_7 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_7 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_7 (Activation) (None, 4, 4, 512) 0
max_pooling2d_3 (MaxPooling (None, 2, 2, 512) 0
2D)
dropout_3 (Dropout) (None, 2, 2, 512) 0
flatten (Flatten) (None, 2048) 0
dense (Dense) (None, 512) 1049088
activation_8 (Activation) (None, 512) 0
dropout_4 (Dropout) (None, 512) 0
batch_normalization_8 (Batc (None, 512) 2048
hNormalization)
dense_1 (Dense) (None, 100) 51300
=================================================================
Total params: 14,693,220
Trainable params: 14,685,028
Non-trainable params: 8,192
_________________________________________________________________
> 58.150
> 61.200
> 61.100
> 60.970
> 62.340
Accuracy: mean=60.752 std=1.390, n=5
313/313 [==============================] - 5s 15ms/step - loss: 2.3846 - accuracy: 0.6358 313/313 [==============================] - 4s 13ms/step
print(error)
visualkeras.layered_view(cifar_dropout)
# pause program
# del cifar_dropout
# gc.collect()
# tf.keras.backend.clear_session()
### function for looping through all the models
def tune_model_act_opt(act,opt):
model = Sequential()
model.add(Conv2D(256,(3,3),padding='same',input_shape=(32,32,3), kernel_initializer=keras.initializers.GlorotNormal(seed=42)))
model.add(BatchNormalization())
model.add(Activation(act))
model.add(Conv2D(256,(3,3),padding='same'))
model.add(BatchNormalization())
model.add(Activation(act))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.2))
model.add(Conv2D(512,(3,3),padding='same'))
model.add(BatchNormalization())
model.add(Activation(act))
model.add(Conv2D(512,(3,3),padding='same'))
model.add(BatchNormalization())
model.add(Activation(act))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.2))
model.add(Conv2D(512,(3,3),padding='same'))
model.add(BatchNormalization())
model.add(Activation(act))
model.add(Conv2D(512,(3,3),padding='same'))
model.add(BatchNormalization())
model.add(Activation(act))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.2))
model.add(Conv2D(512,(3,3),padding='same'))
model.add(BatchNormalization())
model.add(Activation(act))
model.add(Conv2D(512,(3,3),padding='same'))
model.add(BatchNormalization())
model.add(Activation(act))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation(act))
model.add(Dropout(0.2))
model.add(BatchNormalization(momentum=0.95,
epsilon=0.005,
beta_initializer=RandomNormal(mean=0.0, stddev=0.05),
gamma_initializer=Constant(value=0.9)))
model.add(Dense(100,activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
return model
# plot diagnostic learning curves
def summarize_diagnostics(histories,act,opt):
# plot loss
pyplot.subplot(211)
pyplot.title(f'Cross Entropy Loss {act},{opt}')
pyplot.plot(histories.history['loss'], color='blue', label='train')
pyplot.plot(histories.history['val_loss'], color='orange', label='test')
# plot accuracy
pyplot.subplot(212)
pyplot.title(f'Classification Accuracy {act},{opt}')
pyplot.plot(histories.history['accuracy'], color='blue', label='train')
pyplot.plot(histories.history['val_accuracy'], color='orange', label='test')
pyplot.legend()
pyplot.show()
def run_test_harness_act_opt(act,opt,X_train, y_train, X_test, y_test,X_val,y_val):
model = tune_model_act_opt(act,opt)
# fit model
h_callback = EarlyStopping(monitor='val_accuracy', patience=5)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=5, min_lr=0.001)
history = model.fit(X_train, y_train, epochs=100, batch_size=128,validation_data=(X_val, y_val), verbose=0 ,callbacks=[h_callback,reduce_lr])
# evaluate model
test_loss, test_acc = model.evaluate(X_test, y_test, verbose=0)
# ran into memory leakage problem trying to fix with this
gc.collect()
tf.keras.backend.clear_session()
del model
print('Test Accuracy> %.3f' % (test_acc * 100.0))
print('Test Loss> %.3f' % (test_loss * 100.0))
summarize_diagnostics(history,act,opt)
return (test_acc*100, test_loss*100)
from itertools import product
optimizers = ['adam', 'sgd', 'rmsprop', 'adagrad', 'adadelta', 'adamax', 'nadam']
activation = ['relu', 'selu', 'elu', 'tanh',LeakyReLU(alpha=0.01)]
accuracy_act_opt = list()
loss_act_opt = list()
activation_names = ['relu', 'selu', 'elu', 'tanh','LeakyReLU']
act_opt = list(product(activation_names,optimizers))
for act,opt in product(activation,optimizers):
acc , loss= run_test_harness_act_opt(act,opt,X_train, y_train, X_test, y_test,X_val,y_val)
accuracy_act_opt.append(acc)
loss_act_opt.append(loss)
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 32, 32, 256) 7168
batch_normalization (BatchN (None, 32, 32, 256) 1024
ormalization)
activation (Activation) (None, 32, 32, 256) 0
conv2d_1 (Conv2D) (None, 32, 32, 256) 590080
batch_normalization_1 (Batc (None, 32, 32, 256) 1024
hNormalization)
activation_1 (Activation) (None, 32, 32, 256) 0
max_pooling2d (MaxPooling2D (None, 16, 16, 256) 0
)
dropout (Dropout) (None, 16, 16, 256) 0
conv2d_2 (Conv2D) (None, 16, 16, 512) 1180160
batch_normalization_2 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_2 (Activation) (None, 16, 16, 512) 0
conv2d_3 (Conv2D) (None, 16, 16, 512) 2359808
batch_normalization_3 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_3 (Activation) (None, 16, 16, 512) 0
max_pooling2d_1 (MaxPooling (None, 8, 8, 512) 0
2D)
dropout_1 (Dropout) (None, 8, 8, 512) 0
conv2d_4 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_4 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_4 (Activation) (None, 8, 8, 512) 0
conv2d_5 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_5 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_5 (Activation) (None, 8, 8, 512) 0
max_pooling2d_2 (MaxPooling (None, 4, 4, 512) 0
2D)
dropout_2 (Dropout) (None, 4, 4, 512) 0
conv2d_6 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_6 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_6 (Activation) (None, 4, 4, 512) 0
conv2d_7 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_7 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_7 (Activation) (None, 4, 4, 512) 0
max_pooling2d_3 (MaxPooling (None, 2, 2, 512) 0
2D)
dropout_3 (Dropout) (None, 2, 2, 512) 0
flatten (Flatten) (None, 2048) 0
dense (Dense) (None, 512) 1049088
activation_8 (Activation) (None, 512) 0
dropout_4 (Dropout) (None, 512) 0
batch_normalization_8 (Batc (None, 512) 2048
hNormalization)
dense_1 (Dense) (None, 100) 51300
=================================================================
Total params: 14,693,220
Trainable params: 14,685,028
Non-trainable params: 8,192
_________________________________________________________________
Test Accuracy> 56.680
Test Loss> 222.631
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 32, 32, 256) 7168
batch_normalization (BatchN (None, 32, 32, 256) 1024
ormalization)
activation (Activation) (None, 32, 32, 256) 0
conv2d_1 (Conv2D) (None, 32, 32, 256) 590080
batch_normalization_1 (Batc (None, 32, 32, 256) 1024
hNormalization)
activation_1 (Activation) (None, 32, 32, 256) 0
max_pooling2d (MaxPooling2D (None, 16, 16, 256) 0
)
dropout (Dropout) (None, 16, 16, 256) 0
conv2d_2 (Conv2D) (None, 16, 16, 512) 1180160
batch_normalization_2 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_2 (Activation) (None, 16, 16, 512) 0
conv2d_3 (Conv2D) (None, 16, 16, 512) 2359808
batch_normalization_3 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_3 (Activation) (None, 16, 16, 512) 0
max_pooling2d_1 (MaxPooling (None, 8, 8, 512) 0
2D)
dropout_1 (Dropout) (None, 8, 8, 512) 0
conv2d_4 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_4 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_4 (Activation) (None, 8, 8, 512) 0
conv2d_5 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_5 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_5 (Activation) (None, 8, 8, 512) 0
max_pooling2d_2 (MaxPooling (None, 4, 4, 512) 0
2D)
dropout_2 (Dropout) (None, 4, 4, 512) 0
conv2d_6 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_6 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_6 (Activation) (None, 4, 4, 512) 0
conv2d_7 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_7 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_7 (Activation) (None, 4, 4, 512) 0
max_pooling2d_3 (MaxPooling (None, 2, 2, 512) 0
2D)
dropout_3 (Dropout) (None, 2, 2, 512) 0
flatten (Flatten) (None, 2048) 0
dense (Dense) (None, 512) 1049088
activation_8 (Activation) (None, 512) 0
dropout_4 (Dropout) (None, 512) 0
batch_normalization_8 (Batc (None, 512) 2048
hNormalization)
dense_1 (Dense) (None, 100) 51300
=================================================================
Total params: 14,693,220
Trainable params: 14,685,028
Non-trainable params: 8,192
_________________________________________________________________
Test Accuracy> 59.990
Test Loss> 147.696
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 32, 32, 256) 7168
batch_normalization (BatchN (None, 32, 32, 256) 1024
ormalization)
activation (Activation) (None, 32, 32, 256) 0
conv2d_1 (Conv2D) (None, 32, 32, 256) 590080
batch_normalization_1 (Batc (None, 32, 32, 256) 1024
hNormalization)
activation_1 (Activation) (None, 32, 32, 256) 0
max_pooling2d (MaxPooling2D (None, 16, 16, 256) 0
)
dropout (Dropout) (None, 16, 16, 256) 0
conv2d_2 (Conv2D) (None, 16, 16, 512) 1180160
batch_normalization_2 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_2 (Activation) (None, 16, 16, 512) 0
conv2d_3 (Conv2D) (None, 16, 16, 512) 2359808
batch_normalization_3 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_3 (Activation) (None, 16, 16, 512) 0
max_pooling2d_1 (MaxPooling (None, 8, 8, 512) 0
2D)
dropout_1 (Dropout) (None, 8, 8, 512) 0
conv2d_4 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_4 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_4 (Activation) (None, 8, 8, 512) 0
conv2d_5 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_5 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_5 (Activation) (None, 8, 8, 512) 0
max_pooling2d_2 (MaxPooling (None, 4, 4, 512) 0
2D)
dropout_2 (Dropout) (None, 4, 4, 512) 0
conv2d_6 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_6 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_6 (Activation) (None, 4, 4, 512) 0
conv2d_7 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_7 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_7 (Activation) (None, 4, 4, 512) 0
max_pooling2d_3 (MaxPooling (None, 2, 2, 512) 0
2D)
dropout_3 (Dropout) (None, 2, 2, 512) 0
flatten (Flatten) (None, 2048) 0
dense (Dense) (None, 512) 1049088
activation_8 (Activation) (None, 512) 0
dropout_4 (Dropout) (None, 512) 0
batch_normalization_8 (Batc (None, 512) 2048
hNormalization)
dense_1 (Dense) (None, 100) 51300
=================================================================
Total params: 14,693,220
Trainable params: 14,685,028
Non-trainable params: 8,192
_________________________________________________________________
Test Accuracy> 58.850
Test Loss> 234.589
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 32, 32, 256) 7168
batch_normalization (BatchN (None, 32, 32, 256) 1024
ormalization)
activation (Activation) (None, 32, 32, 256) 0
conv2d_1 (Conv2D) (None, 32, 32, 256) 590080
batch_normalization_1 (Batc (None, 32, 32, 256) 1024
hNormalization)
activation_1 (Activation) (None, 32, 32, 256) 0
max_pooling2d (MaxPooling2D (None, 16, 16, 256) 0
)
dropout (Dropout) (None, 16, 16, 256) 0
conv2d_2 (Conv2D) (None, 16, 16, 512) 1180160
batch_normalization_2 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_2 (Activation) (None, 16, 16, 512) 0
conv2d_3 (Conv2D) (None, 16, 16, 512) 2359808
batch_normalization_3 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_3 (Activation) (None, 16, 16, 512) 0
max_pooling2d_1 (MaxPooling (None, 8, 8, 512) 0
2D)
dropout_1 (Dropout) (None, 8, 8, 512) 0
conv2d_4 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_4 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_4 (Activation) (None, 8, 8, 512) 0
conv2d_5 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_5 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_5 (Activation) (None, 8, 8, 512) 0
max_pooling2d_2 (MaxPooling (None, 4, 4, 512) 0
2D)
dropout_2 (Dropout) (None, 4, 4, 512) 0
conv2d_6 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_6 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_6 (Activation) (None, 4, 4, 512) 0
conv2d_7 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_7 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_7 (Activation) (None, 4, 4, 512) 0
max_pooling2d_3 (MaxPooling (None, 2, 2, 512) 0
2D)
dropout_3 (Dropout) (None, 2, 2, 512) 0
flatten (Flatten) (None, 2048) 0
dense (Dense) (None, 512) 1049088
activation_8 (Activation) (None, 512) 0
dropout_4 (Dropout) (None, 512) 0
batch_normalization_8 (Batc (None, 512) 2048
hNormalization)
dense_1 (Dense) (None, 100) 51300
=================================================================
Total params: 14,693,220
Trainable params: 14,685,028
Non-trainable params: 8,192
_________________________________________________________________
Test Accuracy> 39.920
Test Loss> 236.549
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 32, 32, 256) 7168
batch_normalization (BatchN (None, 32, 32, 256) 1024
ormalization)
activation (Activation) (None, 32, 32, 256) 0
conv2d_1 (Conv2D) (None, 32, 32, 256) 590080
batch_normalization_1 (Batc (None, 32, 32, 256) 1024
hNormalization)
activation_1 (Activation) (None, 32, 32, 256) 0
max_pooling2d (MaxPooling2D (None, 16, 16, 256) 0
)
dropout (Dropout) (None, 16, 16, 256) 0
conv2d_2 (Conv2D) (None, 16, 16, 512) 1180160
batch_normalization_2 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_2 (Activation) (None, 16, 16, 512) 0
conv2d_3 (Conv2D) (None, 16, 16, 512) 2359808
batch_normalization_3 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_3 (Activation) (None, 16, 16, 512) 0
max_pooling2d_1 (MaxPooling (None, 8, 8, 512) 0
2D)
dropout_1 (Dropout) (None, 8, 8, 512) 0
conv2d_4 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_4 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_4 (Activation) (None, 8, 8, 512) 0
conv2d_5 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_5 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_5 (Activation) (None, 8, 8, 512) 0
max_pooling2d_2 (MaxPooling (None, 4, 4, 512) 0
2D)
dropout_2 (Dropout) (None, 4, 4, 512) 0
conv2d_6 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_6 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_6 (Activation) (None, 4, 4, 512) 0
conv2d_7 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_7 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_7 (Activation) (None, 4, 4, 512) 0
max_pooling2d_3 (MaxPooling (None, 2, 2, 512) 0
2D)
dropout_3 (Dropout) (None, 2, 2, 512) 0
flatten (Flatten) (None, 2048) 0
dense (Dense) (None, 512) 1049088
activation_8 (Activation) (None, 512) 0
dropout_4 (Dropout) (None, 512) 0
batch_normalization_8 (Batc (None, 512) 2048
hNormalization)
dense_1 (Dense) (None, 100) 51300
=================================================================
Total params: 14,693,220
Trainable params: 14,685,028
Non-trainable params: 8,192
_________________________________________________________________
Test Accuracy> 20.990
Test Loss> 336.280
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 32, 32, 256) 7168
batch_normalization (BatchN (None, 32, 32, 256) 1024
ormalization)
activation (Activation) (None, 32, 32, 256) 0
conv2d_1 (Conv2D) (None, 32, 32, 256) 590080
batch_normalization_1 (Batc (None, 32, 32, 256) 1024
hNormalization)
activation_1 (Activation) (None, 32, 32, 256) 0
max_pooling2d (MaxPooling2D (None, 16, 16, 256) 0
)
dropout (Dropout) (None, 16, 16, 256) 0
conv2d_2 (Conv2D) (None, 16, 16, 512) 1180160
batch_normalization_2 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_2 (Activation) (None, 16, 16, 512) 0
conv2d_3 (Conv2D) (None, 16, 16, 512) 2359808
batch_normalization_3 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_3 (Activation) (None, 16, 16, 512) 0
max_pooling2d_1 (MaxPooling (None, 8, 8, 512) 0
2D)
dropout_1 (Dropout) (None, 8, 8, 512) 0
conv2d_4 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_4 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_4 (Activation) (None, 8, 8, 512) 0
conv2d_5 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_5 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_5 (Activation) (None, 8, 8, 512) 0
max_pooling2d_2 (MaxPooling (None, 4, 4, 512) 0
2D)
dropout_2 (Dropout) (None, 4, 4, 512) 0
conv2d_6 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_6 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_6 (Activation) (None, 4, 4, 512) 0
conv2d_7 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_7 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_7 (Activation) (None, 4, 4, 512) 0
max_pooling2d_3 (MaxPooling (None, 2, 2, 512) 0
2D)
dropout_3 (Dropout) (None, 2, 2, 512) 0
flatten (Flatten) (None, 2048) 0
dense (Dense) (None, 512) 1049088
activation_8 (Activation) (None, 512) 0
dropout_4 (Dropout) (None, 512) 0
batch_normalization_8 (Batc (None, 512) 2048
hNormalization)
dense_1 (Dense) (None, 100) 51300
=================================================================
Total params: 14,693,220
Trainable params: 14,685,028
Non-trainable params: 8,192
_________________________________________________________________
Test Accuracy> 59.800
Test Loss> 176.211
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 32, 32, 256) 7168
batch_normalization (BatchN (None, 32, 32, 256) 1024
ormalization)
activation (Activation) (None, 32, 32, 256) 0
conv2d_1 (Conv2D) (None, 32, 32, 256) 590080
batch_normalization_1 (Batc (None, 32, 32, 256) 1024
hNormalization)
activation_1 (Activation) (None, 32, 32, 256) 0
max_pooling2d (MaxPooling2D (None, 16, 16, 256) 0
)
dropout (Dropout) (None, 16, 16, 256) 0
conv2d_2 (Conv2D) (None, 16, 16, 512) 1180160
batch_normalization_2 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_2 (Activation) (None, 16, 16, 512) 0
conv2d_3 (Conv2D) (None, 16, 16, 512) 2359808
batch_normalization_3 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_3 (Activation) (None, 16, 16, 512) 0
max_pooling2d_1 (MaxPooling (None, 8, 8, 512) 0
2D)
dropout_1 (Dropout) (None, 8, 8, 512) 0
conv2d_4 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_4 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_4 (Activation) (None, 8, 8, 512) 0
conv2d_5 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_5 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_5 (Activation) (None, 8, 8, 512) 0
max_pooling2d_2 (MaxPooling (None, 4, 4, 512) 0
2D)
dropout_2 (Dropout) (None, 4, 4, 512) 0
conv2d_6 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_6 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_6 (Activation) (None, 4, 4, 512) 0
conv2d_7 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_7 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_7 (Activation) (None, 4, 4, 512) 0
max_pooling2d_3 (MaxPooling (None, 2, 2, 512) 0
2D)
dropout_3 (Dropout) (None, 2, 2, 512) 0
flatten (Flatten) (None, 2048) 0
dense (Dense) (None, 512) 1049088
activation_8 (Activation) (None, 512) 0
dropout_4 (Dropout) (None, 512) 0
batch_normalization_8 (Batc (None, 512) 2048
hNormalization)
dense_1 (Dense) (None, 100) 51300
=================================================================
Total params: 14,693,220
Trainable params: 14,685,028
Non-trainable params: 8,192
_________________________________________________________________
Test Accuracy> 58.290
Test Loss> 238.302
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 32, 32, 256) 7168
batch_normalization (BatchN (None, 32, 32, 256) 1024
ormalization)
activation (Activation) (None, 32, 32, 256) 0
conv2d_1 (Conv2D) (None, 32, 32, 256) 590080
batch_normalization_1 (Batc (None, 32, 32, 256) 1024
hNormalization)
activation_1 (Activation) (None, 32, 32, 256) 0
max_pooling2d (MaxPooling2D (None, 16, 16, 256) 0
)
dropout (Dropout) (None, 16, 16, 256) 0
conv2d_2 (Conv2D) (None, 16, 16, 512) 1180160
batch_normalization_2 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_2 (Activation) (None, 16, 16, 512) 0
conv2d_3 (Conv2D) (None, 16, 16, 512) 2359808
batch_normalization_3 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_3 (Activation) (None, 16, 16, 512) 0
max_pooling2d_1 (MaxPooling (None, 8, 8, 512) 0
2D)
dropout_1 (Dropout) (None, 8, 8, 512) 0
conv2d_4 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_4 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_4 (Activation) (None, 8, 8, 512) 0
conv2d_5 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_5 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_5 (Activation) (None, 8, 8, 512) 0
max_pooling2d_2 (MaxPooling (None, 4, 4, 512) 0
2D)
dropout_2 (Dropout) (None, 4, 4, 512) 0
conv2d_6 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_6 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_6 (Activation) (None, 4, 4, 512) 0
conv2d_7 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_7 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_7 (Activation) (None, 4, 4, 512) 0
max_pooling2d_3 (MaxPooling (None, 2, 2, 512) 0
2D)
dropout_3 (Dropout) (None, 2, 2, 512) 0
flatten (Flatten) (None, 2048) 0
dense (Dense) (None, 512) 1049088
activation_8 (Activation) (None, 512) 0
dropout_4 (Dropout) (None, 512) 0
batch_normalization_8 (Batc (None, 512) 2048
hNormalization)
dense_1 (Dense) (None, 100) 51300
=================================================================
Total params: 14,693,220
Trainable params: 14,685,028
Non-trainable params: 8,192
_________________________________________________________________
Test Accuracy> 56.010
Test Loss> 233.398
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 32, 32, 256) 7168
batch_normalization (BatchN (None, 32, 32, 256) 1024
ormalization)
activation (Activation) (None, 32, 32, 256) 0
conv2d_1 (Conv2D) (None, 32, 32, 256) 590080
batch_normalization_1 (Batc (None, 32, 32, 256) 1024
hNormalization)
activation_1 (Activation) (None, 32, 32, 256) 0
max_pooling2d (MaxPooling2D (None, 16, 16, 256) 0
)
dropout (Dropout) (None, 16, 16, 256) 0
conv2d_2 (Conv2D) (None, 16, 16, 512) 1180160
batch_normalization_2 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_2 (Activation) (None, 16, 16, 512) 0
conv2d_3 (Conv2D) (None, 16, 16, 512) 2359808
batch_normalization_3 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_3 (Activation) (None, 16, 16, 512) 0
max_pooling2d_1 (MaxPooling (None, 8, 8, 512) 0
2D)
dropout_1 (Dropout) (None, 8, 8, 512) 0
conv2d_4 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_4 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_4 (Activation) (None, 8, 8, 512) 0
conv2d_5 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_5 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_5 (Activation) (None, 8, 8, 512) 0
max_pooling2d_2 (MaxPooling (None, 4, 4, 512) 0
2D)
dropout_2 (Dropout) (None, 4, 4, 512) 0
conv2d_6 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_6 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_6 (Activation) (None, 4, 4, 512) 0
conv2d_7 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_7 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_7 (Activation) (None, 4, 4, 512) 0
max_pooling2d_3 (MaxPooling (None, 2, 2, 512) 0
2D)
dropout_3 (Dropout) (None, 2, 2, 512) 0
flatten (Flatten) (None, 2048) 0
dense (Dense) (None, 512) 1049088
activation_8 (Activation) (None, 512) 0
dropout_4 (Dropout) (None, 512) 0
batch_normalization_8 (Batc (None, 512) 2048
hNormalization)
dense_1 (Dense) (None, 100) 51300
=================================================================
Total params: 14,693,220
Trainable params: 14,685,028
Non-trainable params: 8,192
_________________________________________________________________
Test Accuracy> 58.750
Test Loss> 160.634
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 32, 32, 256) 7168
batch_normalization (BatchN (None, 32, 32, 256) 1024
ormalization)
activation (Activation) (None, 32, 32, 256) 0
conv2d_1 (Conv2D) (None, 32, 32, 256) 590080
batch_normalization_1 (Batc (None, 32, 32, 256) 1024
hNormalization)
activation_1 (Activation) (None, 32, 32, 256) 0
max_pooling2d (MaxPooling2D (None, 16, 16, 256) 0
)
dropout (Dropout) (None, 16, 16, 256) 0
conv2d_2 (Conv2D) (None, 16, 16, 512) 1180160
batch_normalization_2 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_2 (Activation) (None, 16, 16, 512) 0
conv2d_3 (Conv2D) (None, 16, 16, 512) 2359808
batch_normalization_3 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_3 (Activation) (None, 16, 16, 512) 0
max_pooling2d_1 (MaxPooling (None, 8, 8, 512) 0
2D)
dropout_1 (Dropout) (None, 8, 8, 512) 0
conv2d_4 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_4 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_4 (Activation) (None, 8, 8, 512) 0
conv2d_5 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_5 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_5 (Activation) (None, 8, 8, 512) 0
max_pooling2d_2 (MaxPooling (None, 4, 4, 512) 0
2D)
dropout_2 (Dropout) (None, 4, 4, 512) 0
conv2d_6 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_6 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_6 (Activation) (None, 4, 4, 512) 0
conv2d_7 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_7 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_7 (Activation) (None, 4, 4, 512) 0
max_pooling2d_3 (MaxPooling (None, 2, 2, 512) 0
2D)
dropout_3 (Dropout) (None, 2, 2, 512) 0
flatten (Flatten) (None, 2048) 0
dense (Dense) (None, 512) 1049088
activation_8 (Activation) (None, 512) 0
dropout_4 (Dropout) (None, 512) 0
batch_normalization_8 (Batc (None, 512) 2048
hNormalization)
dense_1 (Dense) (None, 100) 51300
=================================================================
Total params: 14,693,220
Trainable params: 14,685,028
Non-trainable params: 8,192
_________________________________________________________________
Test Accuracy> 53.880
Test Loss> 203.681
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 32, 32, 256) 7168
batch_normalization (BatchN (None, 32, 32, 256) 1024
ormalization)
activation (Activation) (None, 32, 32, 256) 0
conv2d_1 (Conv2D) (None, 32, 32, 256) 590080
batch_normalization_1 (Batc (None, 32, 32, 256) 1024
hNormalization)
activation_1 (Activation) (None, 32, 32, 256) 0
max_pooling2d (MaxPooling2D (None, 16, 16, 256) 0
)
dropout (Dropout) (None, 16, 16, 256) 0
conv2d_2 (Conv2D) (None, 16, 16, 512) 1180160
batch_normalization_2 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_2 (Activation) (None, 16, 16, 512) 0
conv2d_3 (Conv2D) (None, 16, 16, 512) 2359808
batch_normalization_3 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_3 (Activation) (None, 16, 16, 512) 0
max_pooling2d_1 (MaxPooling (None, 8, 8, 512) 0
2D)
dropout_1 (Dropout) (None, 8, 8, 512) 0
conv2d_4 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_4 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_4 (Activation) (None, 8, 8, 512) 0
conv2d_5 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_5 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_5 (Activation) (None, 8, 8, 512) 0
max_pooling2d_2 (MaxPooling (None, 4, 4, 512) 0
2D)
dropout_2 (Dropout) (None, 4, 4, 512) 0
conv2d_6 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_6 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_6 (Activation) (None, 4, 4, 512) 0
conv2d_7 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_7 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_7 (Activation) (None, 4, 4, 512) 0
max_pooling2d_3 (MaxPooling (None, 2, 2, 512) 0
2D)
dropout_3 (Dropout) (None, 2, 2, 512) 0
flatten (Flatten) (None, 2048) 0
dense (Dense) (None, 512) 1049088
activation_8 (Activation) (None, 512) 0
dropout_4 (Dropout) (None, 512) 0
batch_normalization_8 (Batc (None, 512) 2048
hNormalization)
dense_1 (Dense) (None, 100) 51300
=================================================================
Total params: 14,693,220
Trainable params: 14,685,028
Non-trainable params: 8,192
_________________________________________________________________
Test Accuracy> 53.160
Test Loss> 177.970
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 32, 32, 256) 7168
batch_normalization (BatchN (None, 32, 32, 256) 1024
ormalization)
activation (Activation) (None, 32, 32, 256) 0
conv2d_1 (Conv2D) (None, 32, 32, 256) 590080
batch_normalization_1 (Batc (None, 32, 32, 256) 1024
hNormalization)
activation_1 (Activation) (None, 32, 32, 256) 0
max_pooling2d (MaxPooling2D (None, 16, 16, 256) 0
)
dropout (Dropout) (None, 16, 16, 256) 0
conv2d_2 (Conv2D) (None, 16, 16, 512) 1180160
batch_normalization_2 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_2 (Activation) (None, 16, 16, 512) 0
conv2d_3 (Conv2D) (None, 16, 16, 512) 2359808
batch_normalization_3 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_3 (Activation) (None, 16, 16, 512) 0
max_pooling2d_1 (MaxPooling (None, 8, 8, 512) 0
2D)
dropout_1 (Dropout) (None, 8, 8, 512) 0
conv2d_4 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_4 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_4 (Activation) (None, 8, 8, 512) 0
conv2d_5 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_5 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_5 (Activation) (None, 8, 8, 512) 0
max_pooling2d_2 (MaxPooling (None, 4, 4, 512) 0
2D)
dropout_2 (Dropout) (None, 4, 4, 512) 0
conv2d_6 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_6 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_6 (Activation) (None, 4, 4, 512) 0
conv2d_7 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_7 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_7 (Activation) (None, 4, 4, 512) 0
max_pooling2d_3 (MaxPooling (None, 2, 2, 512) 0
2D)
dropout_3 (Dropout) (None, 2, 2, 512) 0
flatten (Flatten) (None, 2048) 0
dense (Dense) (None, 512) 1049088
activation_8 (Activation) (None, 512) 0
dropout_4 (Dropout) (None, 512) 0
batch_normalization_8 (Batc (None, 512) 2048
hNormalization)
dense_1 (Dense) (None, 100) 51300
=================================================================
Total params: 14,693,220
Trainable params: 14,685,028
Non-trainable params: 8,192
_________________________________________________________________
Test Accuracy> 20.490
Test Loss> 344.175
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 32, 32, 256) 7168
batch_normalization (BatchN (None, 32, 32, 256) 1024
ormalization)
activation (Activation) (None, 32, 32, 256) 0
conv2d_1 (Conv2D) (None, 32, 32, 256) 590080
batch_normalization_1 (Batc (None, 32, 32, 256) 1024
hNormalization)
activation_1 (Activation) (None, 32, 32, 256) 0
max_pooling2d (MaxPooling2D (None, 16, 16, 256) 0
)
dropout (Dropout) (None, 16, 16, 256) 0
conv2d_2 (Conv2D) (None, 16, 16, 512) 1180160
batch_normalization_2 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_2 (Activation) (None, 16, 16, 512) 0
conv2d_3 (Conv2D) (None, 16, 16, 512) 2359808
batch_normalization_3 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_3 (Activation) (None, 16, 16, 512) 0
max_pooling2d_1 (MaxPooling (None, 8, 8, 512) 0
2D)
dropout_1 (Dropout) (None, 8, 8, 512) 0
conv2d_4 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_4 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_4 (Activation) (None, 8, 8, 512) 0
conv2d_5 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_5 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_5 (Activation) (None, 8, 8, 512) 0
max_pooling2d_2 (MaxPooling (None, 4, 4, 512) 0
2D)
dropout_2 (Dropout) (None, 4, 4, 512) 0
conv2d_6 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_6 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_6 (Activation) (None, 4, 4, 512) 0
conv2d_7 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_7 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_7 (Activation) (None, 4, 4, 512) 0
max_pooling2d_3 (MaxPooling (None, 2, 2, 512) 0
2D)
dropout_3 (Dropout) (None, 2, 2, 512) 0
flatten (Flatten) (None, 2048) 0
dense (Dense) (None, 512) 1049088
activation_8 (Activation) (None, 512) 0
dropout_4 (Dropout) (None, 512) 0
batch_normalization_8 (Batc (None, 512) 2048
hNormalization)
dense_1 (Dense) (None, 100) 51300
=================================================================
Total params: 14,693,220
Trainable params: 14,685,028
Non-trainable params: 8,192
_________________________________________________________________
Test Accuracy> 57.580
Test Loss> 199.920
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 32, 32, 256) 7168
batch_normalization (BatchN (None, 32, 32, 256) 1024
ormalization)
activation (Activation) (None, 32, 32, 256) 0
conv2d_1 (Conv2D) (None, 32, 32, 256) 590080
batch_normalization_1 (Batc (None, 32, 32, 256) 1024
hNormalization)
activation_1 (Activation) (None, 32, 32, 256) 0
max_pooling2d (MaxPooling2D (None, 16, 16, 256) 0
)
dropout (Dropout) (None, 16, 16, 256) 0
conv2d_2 (Conv2D) (None, 16, 16, 512) 1180160
batch_normalization_2 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_2 (Activation) (None, 16, 16, 512) 0
conv2d_3 (Conv2D) (None, 16, 16, 512) 2359808
batch_normalization_3 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_3 (Activation) (None, 16, 16, 512) 0
max_pooling2d_1 (MaxPooling (None, 8, 8, 512) 0
2D)
dropout_1 (Dropout) (None, 8, 8, 512) 0
conv2d_4 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_4 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_4 (Activation) (None, 8, 8, 512) 0
conv2d_5 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_5 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_5 (Activation) (None, 8, 8, 512) 0
max_pooling2d_2 (MaxPooling (None, 4, 4, 512) 0
2D)
dropout_2 (Dropout) (None, 4, 4, 512) 0
conv2d_6 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_6 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_6 (Activation) (None, 4, 4, 512) 0
conv2d_7 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_7 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_7 (Activation) (None, 4, 4, 512) 0
max_pooling2d_3 (MaxPooling (None, 2, 2, 512) 0
2D)
dropout_3 (Dropout) (None, 2, 2, 512) 0
flatten (Flatten) (None, 2048) 0
dense (Dense) (None, 512) 1049088
activation_8 (Activation) (None, 512) 0
dropout_4 (Dropout) (None, 512) 0
batch_normalization_8 (Batc (None, 512) 2048
hNormalization)
dense_1 (Dense) (None, 100) 51300
=================================================================
Total params: 14,693,220
Trainable params: 14,685,028
Non-trainable params: 8,192
_________________________________________________________________
Test Accuracy> 56.610
Test Loss> 238.583
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 32, 32, 256) 7168
batch_normalization (BatchN (None, 32, 32, 256) 1024
ormalization)
activation (Activation) (None, 32, 32, 256) 0
conv2d_1 (Conv2D) (None, 32, 32, 256) 590080
batch_normalization_1 (Batc (None, 32, 32, 256) 1024
hNormalization)
activation_1 (Activation) (None, 32, 32, 256) 0
max_pooling2d (MaxPooling2D (None, 16, 16, 256) 0
)
dropout (Dropout) (None, 16, 16, 256) 0
conv2d_2 (Conv2D) (None, 16, 16, 512) 1180160
batch_normalization_2 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_2 (Activation) (None, 16, 16, 512) 0
conv2d_3 (Conv2D) (None, 16, 16, 512) 2359808
batch_normalization_3 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_3 (Activation) (None, 16, 16, 512) 0
max_pooling2d_1 (MaxPooling (None, 8, 8, 512) 0
2D)
dropout_1 (Dropout) (None, 8, 8, 512) 0
conv2d_4 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_4 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_4 (Activation) (None, 8, 8, 512) 0
conv2d_5 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_5 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_5 (Activation) (None, 8, 8, 512) 0
max_pooling2d_2 (MaxPooling (None, 4, 4, 512) 0
2D)
dropout_2 (Dropout) (None, 4, 4, 512) 0
conv2d_6 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_6 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_6 (Activation) (None, 4, 4, 512) 0
conv2d_7 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_7 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_7 (Activation) (None, 4, 4, 512) 0
max_pooling2d_3 (MaxPooling (None, 2, 2, 512) 0
2D)
dropout_3 (Dropout) (None, 2, 2, 512) 0
flatten (Flatten) (None, 2048) 0
dense (Dense) (None, 512) 1049088
activation_8 (Activation) (None, 512) 0
dropout_4 (Dropout) (None, 512) 0
batch_normalization_8 (Batc (None, 512) 2048
hNormalization)
dense_1 (Dense) (None, 100) 51300
=================================================================
Total params: 14,693,220
Trainable params: 14,685,028
Non-trainable params: 8,192
_________________________________________________________________
Test Accuracy> 57.220
Test Loss> 226.320
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 32, 32, 256) 7168
batch_normalization (BatchN (None, 32, 32, 256) 1024
ormalization)
activation (Activation) (None, 32, 32, 256) 0
conv2d_1 (Conv2D) (None, 32, 32, 256) 590080
batch_normalization_1 (Batc (None, 32, 32, 256) 1024
hNormalization)
activation_1 (Activation) (None, 32, 32, 256) 0
max_pooling2d (MaxPooling2D (None, 16, 16, 256) 0
)
dropout (Dropout) (None, 16, 16, 256) 0
conv2d_2 (Conv2D) (None, 16, 16, 512) 1180160
batch_normalization_2 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_2 (Activation) (None, 16, 16, 512) 0
conv2d_3 (Conv2D) (None, 16, 16, 512) 2359808
batch_normalization_3 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_3 (Activation) (None, 16, 16, 512) 0
max_pooling2d_1 (MaxPooling (None, 8, 8, 512) 0
2D)
dropout_1 (Dropout) (None, 8, 8, 512) 0
conv2d_4 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_4 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_4 (Activation) (None, 8, 8, 512) 0
conv2d_5 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_5 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_5 (Activation) (None, 8, 8, 512) 0
max_pooling2d_2 (MaxPooling (None, 4, 4, 512) 0
2D)
dropout_2 (Dropout) (None, 4, 4, 512) 0
conv2d_6 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_6 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_6 (Activation) (None, 4, 4, 512) 0
conv2d_7 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_7 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_7 (Activation) (None, 4, 4, 512) 0
max_pooling2d_3 (MaxPooling (None, 2, 2, 512) 0
2D)
dropout_3 (Dropout) (None, 2, 2, 512) 0
flatten (Flatten) (None, 2048) 0
dense (Dense) (None, 512) 1049088
activation_8 (Activation) (None, 512) 0
dropout_4 (Dropout) (None, 512) 0
batch_normalization_8 (Batc (None, 512) 2048
hNormalization)
dense_1 (Dense) (None, 100) 51300
=================================================================
Total params: 14,693,220
Trainable params: 14,685,028
Non-trainable params: 8,192
_________________________________________________________________
Test Accuracy> 60.900
Test Loss> 148.233
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 32, 32, 256) 7168
batch_normalization (BatchN (None, 32, 32, 256) 1024
ormalization)
activation (Activation) (None, 32, 32, 256) 0
conv2d_1 (Conv2D) (None, 32, 32, 256) 590080
batch_normalization_1 (Batc (None, 32, 32, 256) 1024
hNormalization)
activation_1 (Activation) (None, 32, 32, 256) 0
max_pooling2d (MaxPooling2D (None, 16, 16, 256) 0
)
dropout (Dropout) (None, 16, 16, 256) 0
conv2d_2 (Conv2D) (None, 16, 16, 512) 1180160
batch_normalization_2 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_2 (Activation) (None, 16, 16, 512) 0
conv2d_3 (Conv2D) (None, 16, 16, 512) 2359808
batch_normalization_3 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_3 (Activation) (None, 16, 16, 512) 0
max_pooling2d_1 (MaxPooling (None, 8, 8, 512) 0
2D)
dropout_1 (Dropout) (None, 8, 8, 512) 0
conv2d_4 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_4 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_4 (Activation) (None, 8, 8, 512) 0
conv2d_5 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_5 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_5 (Activation) (None, 8, 8, 512) 0
max_pooling2d_2 (MaxPooling (None, 4, 4, 512) 0
2D)
dropout_2 (Dropout) (None, 4, 4, 512) 0
conv2d_6 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_6 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_6 (Activation) (None, 4, 4, 512) 0
conv2d_7 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_7 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_7 (Activation) (None, 4, 4, 512) 0
max_pooling2d_3 (MaxPooling (None, 2, 2, 512) 0
2D)
dropout_3 (Dropout) (None, 2, 2, 512) 0
flatten (Flatten) (None, 2048) 0
dense (Dense) (None, 512) 1049088
activation_8 (Activation) (None, 512) 0
dropout_4 (Dropout) (None, 512) 0
batch_normalization_8 (Batc (None, 512) 2048
hNormalization)
dense_1 (Dense) (None, 100) 51300
=================================================================
Total params: 14,693,220
Trainable params: 14,685,028
Non-trainable params: 8,192
_________________________________________________________________
Test Accuracy> 56.670
Test Loss> 229.304
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 32, 32, 256) 7168
batch_normalization (BatchN (None, 32, 32, 256) 1024
ormalization)
activation (Activation) (None, 32, 32, 256) 0
conv2d_1 (Conv2D) (None, 32, 32, 256) 590080
batch_normalization_1 (Batc (None, 32, 32, 256) 1024
hNormalization)
activation_1 (Activation) (None, 32, 32, 256) 0
max_pooling2d (MaxPooling2D (None, 16, 16, 256) 0
)
dropout (Dropout) (None, 16, 16, 256) 0
conv2d_2 (Conv2D) (None, 16, 16, 512) 1180160
batch_normalization_2 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_2 (Activation) (None, 16, 16, 512) 0
conv2d_3 (Conv2D) (None, 16, 16, 512) 2359808
batch_normalization_3 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_3 (Activation) (None, 16, 16, 512) 0
max_pooling2d_1 (MaxPooling (None, 8, 8, 512) 0
2D)
dropout_1 (Dropout) (None, 8, 8, 512) 0
conv2d_4 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_4 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_4 (Activation) (None, 8, 8, 512) 0
conv2d_5 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_5 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_5 (Activation) (None, 8, 8, 512) 0
max_pooling2d_2 (MaxPooling (None, 4, 4, 512) 0
2D)
dropout_2 (Dropout) (None, 4, 4, 512) 0
conv2d_6 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_6 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_6 (Activation) (None, 4, 4, 512) 0
conv2d_7 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_7 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_7 (Activation) (None, 4, 4, 512) 0
max_pooling2d_3 (MaxPooling (None, 2, 2, 512) 0
2D)
dropout_3 (Dropout) (None, 2, 2, 512) 0
flatten (Flatten) (None, 2048) 0
dense (Dense) (None, 512) 1049088
activation_8 (Activation) (None, 512) 0
dropout_4 (Dropout) (None, 512) 0
batch_normalization_8 (Batc (None, 512) 2048
hNormalization)
dense_1 (Dense) (None, 100) 51300
=================================================================
Total params: 14,693,220
Trainable params: 14,685,028
Non-trainable params: 8,192
_________________________________________________________________
Test Accuracy> 52.440
Test Loss> 179.750
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 32, 32, 256) 7168
batch_normalization (BatchN (None, 32, 32, 256) 1024
ormalization)
activation (Activation) (None, 32, 32, 256) 0
conv2d_1 (Conv2D) (None, 32, 32, 256) 590080
batch_normalization_1 (Batc (None, 32, 32, 256) 1024
hNormalization)
activation_1 (Activation) (None, 32, 32, 256) 0
max_pooling2d (MaxPooling2D (None, 16, 16, 256) 0
)
dropout (Dropout) (None, 16, 16, 256) 0
conv2d_2 (Conv2D) (None, 16, 16, 512) 1180160
batch_normalization_2 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_2 (Activation) (None, 16, 16, 512) 0
conv2d_3 (Conv2D) (None, 16, 16, 512) 2359808
batch_normalization_3 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_3 (Activation) (None, 16, 16, 512) 0
max_pooling2d_1 (MaxPooling (None, 8, 8, 512) 0
2D)
dropout_1 (Dropout) (None, 8, 8, 512) 0
conv2d_4 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_4 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_4 (Activation) (None, 8, 8, 512) 0
conv2d_5 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_5 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_5 (Activation) (None, 8, 8, 512) 0
max_pooling2d_2 (MaxPooling (None, 4, 4, 512) 0
2D)
dropout_2 (Dropout) (None, 4, 4, 512) 0
conv2d_6 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_6 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_6 (Activation) (None, 4, 4, 512) 0
conv2d_7 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_7 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_7 (Activation) (None, 4, 4, 512) 0
max_pooling2d_3 (MaxPooling (None, 2, 2, 512) 0
2D)
dropout_3 (Dropout) (None, 2, 2, 512) 0
flatten (Flatten) (None, 2048) 0
dense (Dense) (None, 512) 1049088
activation_8 (Activation) (None, 512) 0
dropout_4 (Dropout) (None, 512) 0
batch_normalization_8 (Batc (None, 512) 2048
hNormalization)
dense_1 (Dense) (None, 100) 51300
=================================================================
Total params: 14,693,220
Trainable params: 14,685,028
Non-trainable params: 8,192
_________________________________________________________________
Test Accuracy> 18.390
Test Loss> 347.307
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 32, 32, 256) 7168
batch_normalization (BatchN (None, 32, 32, 256) 1024
ormalization)
activation (Activation) (None, 32, 32, 256) 0
conv2d_1 (Conv2D) (None, 32, 32, 256) 590080
batch_normalization_1 (Batc (None, 32, 32, 256) 1024
hNormalization)
activation_1 (Activation) (None, 32, 32, 256) 0
max_pooling2d (MaxPooling2D (None, 16, 16, 256) 0
)
dropout (Dropout) (None, 16, 16, 256) 0
conv2d_2 (Conv2D) (None, 16, 16, 512) 1180160
batch_normalization_2 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_2 (Activation) (None, 16, 16, 512) 0
conv2d_3 (Conv2D) (None, 16, 16, 512) 2359808
batch_normalization_3 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_3 (Activation) (None, 16, 16, 512) 0
max_pooling2d_1 (MaxPooling (None, 8, 8, 512) 0
2D)
dropout_1 (Dropout) (None, 8, 8, 512) 0
conv2d_4 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_4 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_4 (Activation) (None, 8, 8, 512) 0
conv2d_5 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_5 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_5 (Activation) (None, 8, 8, 512) 0
max_pooling2d_2 (MaxPooling (None, 4, 4, 512) 0
2D)
dropout_2 (Dropout) (None, 4, 4, 512) 0
conv2d_6 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_6 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_6 (Activation) (None, 4, 4, 512) 0
conv2d_7 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_7 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_7 (Activation) (None, 4, 4, 512) 0
max_pooling2d_3 (MaxPooling (None, 2, 2, 512) 0
2D)
dropout_3 (Dropout) (None, 2, 2, 512) 0
flatten (Flatten) (None, 2048) 0
dense (Dense) (None, 512) 1049088
activation_8 (Activation) (None, 512) 0
dropout_4 (Dropout) (None, 512) 0
batch_normalization_8 (Batc (None, 512) 2048
hNormalization)
dense_1 (Dense) (None, 100) 51300
=================================================================
Total params: 14,693,220
Trainable params: 14,685,028
Non-trainable params: 8,192
_________________________________________________________________
Test Accuracy> 59.840
Test Loss> 163.957
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 32, 32, 256) 7168
batch_normalization (BatchN (None, 32, 32, 256) 1024
ormalization)
activation (Activation) (None, 32, 32, 256) 0
conv2d_1 (Conv2D) (None, 32, 32, 256) 590080
batch_normalization_1 (Batc (None, 32, 32, 256) 1024
hNormalization)
activation_1 (Activation) (None, 32, 32, 256) 0
max_pooling2d (MaxPooling2D (None, 16, 16, 256) 0
)
dropout (Dropout) (None, 16, 16, 256) 0
conv2d_2 (Conv2D) (None, 16, 16, 512) 1180160
batch_normalization_2 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_2 (Activation) (None, 16, 16, 512) 0
conv2d_3 (Conv2D) (None, 16, 16, 512) 2359808
batch_normalization_3 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_3 (Activation) (None, 16, 16, 512) 0
max_pooling2d_1 (MaxPooling (None, 8, 8, 512) 0
2D)
dropout_1 (Dropout) (None, 8, 8, 512) 0
conv2d_4 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_4 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_4 (Activation) (None, 8, 8, 512) 0
conv2d_5 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_5 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_5 (Activation) (None, 8, 8, 512) 0
max_pooling2d_2 (MaxPooling (None, 4, 4, 512) 0
2D)
dropout_2 (Dropout) (None, 4, 4, 512) 0
conv2d_6 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_6 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_6 (Activation) (None, 4, 4, 512) 0
conv2d_7 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_7 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_7 (Activation) (None, 4, 4, 512) 0
max_pooling2d_3 (MaxPooling (None, 2, 2, 512) 0
2D)
dropout_3 (Dropout) (None, 2, 2, 512) 0
flatten (Flatten) (None, 2048) 0
dense (Dense) (None, 512) 1049088
activation_8 (Activation) (None, 512) 0
dropout_4 (Dropout) (None, 512) 0
batch_normalization_8 (Batc (None, 512) 2048
hNormalization)
dense_1 (Dense) (None, 100) 51300
=================================================================
Total params: 14,693,220
Trainable params: 14,685,028
Non-trainable params: 8,192
_________________________________________________________________
Test Accuracy> 58.710
Test Loss> 210.232
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 32, 32, 256) 7168
batch_normalization (BatchN (None, 32, 32, 256) 1024
ormalization)
activation (Activation) (None, 32, 32, 256) 0
conv2d_1 (Conv2D) (None, 32, 32, 256) 590080
batch_normalization_1 (Batc (None, 32, 32, 256) 1024
hNormalization)
activation_1 (Activation) (None, 32, 32, 256) 0
max_pooling2d (MaxPooling2D (None, 16, 16, 256) 0
)
dropout (Dropout) (None, 16, 16, 256) 0
conv2d_2 (Conv2D) (None, 16, 16, 512) 1180160
batch_normalization_2 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_2 (Activation) (None, 16, 16, 512) 0
conv2d_3 (Conv2D) (None, 16, 16, 512) 2359808
batch_normalization_3 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_3 (Activation) (None, 16, 16, 512) 0
max_pooling2d_1 (MaxPooling (None, 8, 8, 512) 0
2D)
dropout_1 (Dropout) (None, 8, 8, 512) 0
conv2d_4 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_4 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_4 (Activation) (None, 8, 8, 512) 0
conv2d_5 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_5 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_5 (Activation) (None, 8, 8, 512) 0
max_pooling2d_2 (MaxPooling (None, 4, 4, 512) 0
2D)
dropout_2 (Dropout) (None, 4, 4, 512) 0
conv2d_6 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_6 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_6 (Activation) (None, 4, 4, 512) 0
conv2d_7 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_7 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_7 (Activation) (None, 4, 4, 512) 0
max_pooling2d_3 (MaxPooling (None, 2, 2, 512) 0
2D)
dropout_3 (Dropout) (None, 2, 2, 512) 0
flatten (Flatten) (None, 2048) 0
dense (Dense) (None, 512) 1049088
activation_8 (Activation) (None, 512) 0
dropout_4 (Dropout) (None, 512) 0
batch_normalization_8 (Batc (None, 512) 2048
hNormalization)
dense_1 (Dense) (None, 100) 51300
=================================================================
Total params: 14,693,220
Trainable params: 14,685,028
Non-trainable params: 8,192
_________________________________________________________________
Test Accuracy> 50.060
Test Loss> 301.374
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 32, 32, 256) 7168
batch_normalization (BatchN (None, 32, 32, 256) 1024
ormalization)
activation (Activation) (None, 32, 32, 256) 0
conv2d_1 (Conv2D) (None, 32, 32, 256) 590080
batch_normalization_1 (Batc (None, 32, 32, 256) 1024
hNormalization)
activation_1 (Activation) (None, 32, 32, 256) 0
max_pooling2d (MaxPooling2D (None, 16, 16, 256) 0
)
dropout (Dropout) (None, 16, 16, 256) 0
conv2d_2 (Conv2D) (None, 16, 16, 512) 1180160
batch_normalization_2 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_2 (Activation) (None, 16, 16, 512) 0
conv2d_3 (Conv2D) (None, 16, 16, 512) 2359808
batch_normalization_3 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_3 (Activation) (None, 16, 16, 512) 0
max_pooling2d_1 (MaxPooling (None, 8, 8, 512) 0
2D)
dropout_1 (Dropout) (None, 8, 8, 512) 0
conv2d_4 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_4 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_4 (Activation) (None, 8, 8, 512) 0
conv2d_5 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_5 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_5 (Activation) (None, 8, 8, 512) 0
max_pooling2d_2 (MaxPooling (None, 4, 4, 512) 0
2D)
dropout_2 (Dropout) (None, 4, 4, 512) 0
conv2d_6 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_6 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_6 (Activation) (None, 4, 4, 512) 0
conv2d_7 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_7 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_7 (Activation) (None, 4, 4, 512) 0
max_pooling2d_3 (MaxPooling (None, 2, 2, 512) 0
2D)
dropout_3 (Dropout) (None, 2, 2, 512) 0
flatten (Flatten) (None, 2048) 0
dense (Dense) (None, 512) 1049088
activation_8 (Activation) (None, 512) 0
dropout_4 (Dropout) (None, 512) 0
batch_normalization_8 (Batc (None, 512) 2048
hNormalization)
dense_1 (Dense) (None, 100) 51300
=================================================================
Total params: 14,693,220
Trainable params: 14,685,028
Non-trainable params: 8,192
_________________________________________________________________
Test Accuracy> 50.800
Test Loss> 193.366
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 32, 32, 256) 7168
batch_normalization (BatchN (None, 32, 32, 256) 1024
ormalization)
activation (Activation) (None, 32, 32, 256) 0
conv2d_1 (Conv2D) (None, 32, 32, 256) 590080
batch_normalization_1 (Batc (None, 32, 32, 256) 1024
hNormalization)
activation_1 (Activation) (None, 32, 32, 256) 0
max_pooling2d (MaxPooling2D (None, 16, 16, 256) 0
)
dropout (Dropout) (None, 16, 16, 256) 0
conv2d_2 (Conv2D) (None, 16, 16, 512) 1180160
batch_normalization_2 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_2 (Activation) (None, 16, 16, 512) 0
conv2d_3 (Conv2D) (None, 16, 16, 512) 2359808
batch_normalization_3 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_3 (Activation) (None, 16, 16, 512) 0
max_pooling2d_1 (MaxPooling (None, 8, 8, 512) 0
2D)
dropout_1 (Dropout) (None, 8, 8, 512) 0
conv2d_4 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_4 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_4 (Activation) (None, 8, 8, 512) 0
conv2d_5 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_5 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_5 (Activation) (None, 8, 8, 512) 0
max_pooling2d_2 (MaxPooling (None, 4, 4, 512) 0
2D)
dropout_2 (Dropout) (None, 4, 4, 512) 0
conv2d_6 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_6 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_6 (Activation) (None, 4, 4, 512) 0
conv2d_7 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_7 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_7 (Activation) (None, 4, 4, 512) 0
max_pooling2d_3 (MaxPooling (None, 2, 2, 512) 0
2D)
dropout_3 (Dropout) (None, 2, 2, 512) 0
flatten (Flatten) (None, 2048) 0
dense (Dense) (None, 512) 1049088
activation_8 (Activation) (None, 512) 0
dropout_4 (Dropout) (None, 512) 0
batch_normalization_8 (Batc (None, 512) 2048
hNormalization)
dense_1 (Dense) (None, 100) 51300
=================================================================
Total params: 14,693,220
Trainable params: 14,685,028
Non-trainable params: 8,192
_________________________________________________________________
Test Accuracy> 47.480
Test Loss> 265.439
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 32, 32, 256) 7168
batch_normalization (BatchN (None, 32, 32, 256) 1024
ormalization)
activation (Activation) (None, 32, 32, 256) 0
conv2d_1 (Conv2D) (None, 32, 32, 256) 590080
batch_normalization_1 (Batc (None, 32, 32, 256) 1024
hNormalization)
activation_1 (Activation) (None, 32, 32, 256) 0
max_pooling2d (MaxPooling2D (None, 16, 16, 256) 0
)
dropout (Dropout) (None, 16, 16, 256) 0
conv2d_2 (Conv2D) (None, 16, 16, 512) 1180160
batch_normalization_2 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_2 (Activation) (None, 16, 16, 512) 0
conv2d_3 (Conv2D) (None, 16, 16, 512) 2359808
batch_normalization_3 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_3 (Activation) (None, 16, 16, 512) 0
max_pooling2d_1 (MaxPooling (None, 8, 8, 512) 0
2D)
dropout_1 (Dropout) (None, 8, 8, 512) 0
conv2d_4 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_4 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_4 (Activation) (None, 8, 8, 512) 0
conv2d_5 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_5 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_5 (Activation) (None, 8, 8, 512) 0
max_pooling2d_2 (MaxPooling (None, 4, 4, 512) 0
2D)
dropout_2 (Dropout) (None, 4, 4, 512) 0
conv2d_6 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_6 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_6 (Activation) (None, 4, 4, 512) 0
conv2d_7 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_7 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_7 (Activation) (None, 4, 4, 512) 0
max_pooling2d_3 (MaxPooling (None, 2, 2, 512) 0
2D)
dropout_3 (Dropout) (None, 2, 2, 512) 0
flatten (Flatten) (None, 2048) 0
dense (Dense) (None, 512) 1049088
activation_8 (Activation) (None, 512) 0
dropout_4 (Dropout) (None, 512) 0
batch_normalization_8 (Batc (None, 512) 2048
hNormalization)
dense_1 (Dense) (None, 100) 51300
=================================================================
Total params: 14,693,220
Trainable params: 14,685,028
Non-trainable params: 8,192
_________________________________________________________________
Test Accuracy> 42.370
Test Loss> 225.244
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 32, 32, 256) 7168
batch_normalization (BatchN (None, 32, 32, 256) 1024
ormalization)
activation (Activation) (None, 32, 32, 256) 0
conv2d_1 (Conv2D) (None, 32, 32, 256) 590080
batch_normalization_1 (Batc (None, 32, 32, 256) 1024
hNormalization)
activation_1 (Activation) (None, 32, 32, 256) 0
max_pooling2d (MaxPooling2D (None, 16, 16, 256) 0
)
dropout (Dropout) (None, 16, 16, 256) 0
conv2d_2 (Conv2D) (None, 16, 16, 512) 1180160
batch_normalization_2 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_2 (Activation) (None, 16, 16, 512) 0
conv2d_3 (Conv2D) (None, 16, 16, 512) 2359808
batch_normalization_3 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_3 (Activation) (None, 16, 16, 512) 0
max_pooling2d_1 (MaxPooling (None, 8, 8, 512) 0
2D)
dropout_1 (Dropout) (None, 8, 8, 512) 0
conv2d_4 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_4 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_4 (Activation) (None, 8, 8, 512) 0
conv2d_5 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_5 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_5 (Activation) (None, 8, 8, 512) 0
max_pooling2d_2 (MaxPooling (None, 4, 4, 512) 0
2D)
dropout_2 (Dropout) (None, 4, 4, 512) 0
conv2d_6 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_6 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_6 (Activation) (None, 4, 4, 512) 0
conv2d_7 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_7 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_7 (Activation) (None, 4, 4, 512) 0
max_pooling2d_3 (MaxPooling (None, 2, 2, 512) 0
2D)
dropout_3 (Dropout) (None, 2, 2, 512) 0
flatten (Flatten) (None, 2048) 0
dense (Dense) (None, 512) 1049088
activation_8 (Activation) (None, 512) 0
dropout_4 (Dropout) (None, 512) 0
batch_normalization_8 (Batc (None, 512) 2048
hNormalization)
dense_1 (Dense) (None, 100) 51300
=================================================================
Total params: 14,693,220
Trainable params: 14,685,028
Non-trainable params: 8,192
_________________________________________________________________
Test Accuracy> 16.680
Test Loss> 367.074
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 32, 32, 256) 7168
batch_normalization (BatchN (None, 32, 32, 256) 1024
ormalization)
activation (Activation) (None, 32, 32, 256) 0
conv2d_1 (Conv2D) (None, 32, 32, 256) 590080
batch_normalization_1 (Batc (None, 32, 32, 256) 1024
hNormalization)
activation_1 (Activation) (None, 32, 32, 256) 0
max_pooling2d (MaxPooling2D (None, 16, 16, 256) 0
)
dropout (Dropout) (None, 16, 16, 256) 0
conv2d_2 (Conv2D) (None, 16, 16, 512) 1180160
batch_normalization_2 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_2 (Activation) (None, 16, 16, 512) 0
conv2d_3 (Conv2D) (None, 16, 16, 512) 2359808
batch_normalization_3 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_3 (Activation) (None, 16, 16, 512) 0
max_pooling2d_1 (MaxPooling (None, 8, 8, 512) 0
2D)
dropout_1 (Dropout) (None, 8, 8, 512) 0
conv2d_4 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_4 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_4 (Activation) (None, 8, 8, 512) 0
conv2d_5 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_5 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_5 (Activation) (None, 8, 8, 512) 0
max_pooling2d_2 (MaxPooling (None, 4, 4, 512) 0
2D)
dropout_2 (Dropout) (None, 4, 4, 512) 0
conv2d_6 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_6 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_6 (Activation) (None, 4, 4, 512) 0
conv2d_7 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_7 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_7 (Activation) (None, 4, 4, 512) 0
max_pooling2d_3 (MaxPooling (None, 2, 2, 512) 0
2D)
dropout_3 (Dropout) (None, 2, 2, 512) 0
flatten (Flatten) (None, 2048) 0
dense (Dense) (None, 512) 1049088
activation_8 (Activation) (None, 512) 0
dropout_4 (Dropout) (None, 512) 0
batch_normalization_8 (Batc (None, 512) 2048
hNormalization)
dense_1 (Dense) (None, 100) 51300
=================================================================
Total params: 14,693,220
Trainable params: 14,685,028
Non-trainable params: 8,192
_________________________________________________________________
Test Accuracy> 53.610
Test Loss> 217.107
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 32, 32, 256) 7168
batch_normalization (BatchN (None, 32, 32, 256) 1024
ormalization)
activation (Activation) (None, 32, 32, 256) 0
conv2d_1 (Conv2D) (None, 32, 32, 256) 590080
batch_normalization_1 (Batc (None, 32, 32, 256) 1024
hNormalization)
activation_1 (Activation) (None, 32, 32, 256) 0
max_pooling2d (MaxPooling2D (None, 16, 16, 256) 0
)
dropout (Dropout) (None, 16, 16, 256) 0
conv2d_2 (Conv2D) (None, 16, 16, 512) 1180160
batch_normalization_2 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_2 (Activation) (None, 16, 16, 512) 0
conv2d_3 (Conv2D) (None, 16, 16, 512) 2359808
batch_normalization_3 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_3 (Activation) (None, 16, 16, 512) 0
max_pooling2d_1 (MaxPooling (None, 8, 8, 512) 0
2D)
dropout_1 (Dropout) (None, 8, 8, 512) 0
conv2d_4 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_4 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_4 (Activation) (None, 8, 8, 512) 0
conv2d_5 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_5 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_5 (Activation) (None, 8, 8, 512) 0
max_pooling2d_2 (MaxPooling (None, 4, 4, 512) 0
2D)
dropout_2 (Dropout) (None, 4, 4, 512) 0
conv2d_6 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_6 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_6 (Activation) (None, 4, 4, 512) 0
conv2d_7 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_7 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_7 (Activation) (None, 4, 4, 512) 0
max_pooling2d_3 (MaxPooling (None, 2, 2, 512) 0
2D)
dropout_3 (Dropout) (None, 2, 2, 512) 0
flatten (Flatten) (None, 2048) 0
dense (Dense) (None, 512) 1049088
activation_8 (Activation) (None, 512) 0
dropout_4 (Dropout) (None, 512) 0
batch_normalization_8 (Batc (None, 512) 2048
hNormalization)
dense_1 (Dense) (None, 100) 51300
=================================================================
Total params: 14,693,220
Trainable params: 14,685,028
Non-trainable params: 8,192
_________________________________________________________________
Test Accuracy> 51.990
Test Loss> 249.734
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 32, 32, 256) 7168
batch_normalization (BatchN (None, 32, 32, 256) 1024
ormalization)
activation (Activation) (None, 32, 32, 256) 0
conv2d_1 (Conv2D) (None, 32, 32, 256) 590080
batch_normalization_1 (Batc (None, 32, 32, 256) 1024
hNormalization)
activation_1 (Activation) (None, 32, 32, 256) 0
max_pooling2d (MaxPooling2D (None, 16, 16, 256) 0
)
dropout (Dropout) (None, 16, 16, 256) 0
conv2d_2 (Conv2D) (None, 16, 16, 512) 1180160
batch_normalization_2 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_2 (Activation) (None, 16, 16, 512) 0
conv2d_3 (Conv2D) (None, 16, 16, 512) 2359808
batch_normalization_3 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_3 (Activation) (None, 16, 16, 512) 0
max_pooling2d_1 (MaxPooling (None, 8, 8, 512) 0
2D)
dropout_1 (Dropout) (None, 8, 8, 512) 0
conv2d_4 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_4 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_4 (Activation) (None, 8, 8, 512) 0
conv2d_5 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_5 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_5 (Activation) (None, 8, 8, 512) 0
max_pooling2d_2 (MaxPooling (None, 4, 4, 512) 0
2D)
dropout_2 (Dropout) (None, 4, 4, 512) 0
conv2d_6 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_6 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_6 (Activation) (None, 4, 4, 512) 0
conv2d_7 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_7 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_7 (Activation) (None, 4, 4, 512) 0
max_pooling2d_3 (MaxPooling (None, 2, 2, 512) 0
2D)
dropout_3 (Dropout) (None, 2, 2, 512) 0
flatten (Flatten) (None, 2048) 0
dense (Dense) (None, 512) 1049088
activation_8 (Activation) (None, 512) 0
dropout_4 (Dropout) (None, 512) 0
batch_normalization_8 (Batc (None, 512) 2048
hNormalization)
dense_1 (Dense) (None, 100) 51300
=================================================================
Total params: 14,693,220
Trainable params: 14,685,028
Non-trainable params: 8,192
_________________________________________________________________
Test Accuracy> 59.920
Test Loss> 252.905
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 32, 32, 256) 7168
batch_normalization (BatchN (None, 32, 32, 256) 1024
ormalization)
activation (Activation) (None, 32, 32, 256) 0
conv2d_1 (Conv2D) (None, 32, 32, 256) 590080
batch_normalization_1 (Batc (None, 32, 32, 256) 1024
hNormalization)
activation_1 (Activation) (None, 32, 32, 256) 0
max_pooling2d (MaxPooling2D (None, 16, 16, 256) 0
)
dropout (Dropout) (None, 16, 16, 256) 0
conv2d_2 (Conv2D) (None, 16, 16, 512) 1180160
batch_normalization_2 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_2 (Activation) (None, 16, 16, 512) 0
conv2d_3 (Conv2D) (None, 16, 16, 512) 2359808
batch_normalization_3 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_3 (Activation) (None, 16, 16, 512) 0
max_pooling2d_1 (MaxPooling (None, 8, 8, 512) 0
2D)
dropout_1 (Dropout) (None, 8, 8, 512) 0
conv2d_4 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_4 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_4 (Activation) (None, 8, 8, 512) 0
conv2d_5 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_5 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_5 (Activation) (None, 8, 8, 512) 0
max_pooling2d_2 (MaxPooling (None, 4, 4, 512) 0
2D)
dropout_2 (Dropout) (None, 4, 4, 512) 0
conv2d_6 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_6 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_6 (Activation) (None, 4, 4, 512) 0
conv2d_7 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_7 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_7 (Activation) (None, 4, 4, 512) 0
max_pooling2d_3 (MaxPooling (None, 2, 2, 512) 0
2D)
dropout_3 (Dropout) (None, 2, 2, 512) 0
flatten (Flatten) (None, 2048) 0
dense (Dense) (None, 512) 1049088
activation_8 (Activation) (None, 512) 0
dropout_4 (Dropout) (None, 512) 0
batch_normalization_8 (Batc (None, 512) 2048
hNormalization)
dense_1 (Dense) (None, 100) 51300
=================================================================
Total params: 14,693,220
Trainable params: 14,685,028
Non-trainable params: 8,192
_________________________________________________________________
Test Accuracy> 56.740
Test Loss> 164.970
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 32, 32, 256) 7168
batch_normalization (BatchN (None, 32, 32, 256) 1024
ormalization)
activation (Activation) (None, 32, 32, 256) 0
conv2d_1 (Conv2D) (None, 32, 32, 256) 590080
batch_normalization_1 (Batc (None, 32, 32, 256) 1024
hNormalization)
activation_1 (Activation) (None, 32, 32, 256) 0
max_pooling2d (MaxPooling2D (None, 16, 16, 256) 0
)
dropout (Dropout) (None, 16, 16, 256) 0
conv2d_2 (Conv2D) (None, 16, 16, 512) 1180160
batch_normalization_2 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_2 (Activation) (None, 16, 16, 512) 0
conv2d_3 (Conv2D) (None, 16, 16, 512) 2359808
batch_normalization_3 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_3 (Activation) (None, 16, 16, 512) 0
max_pooling2d_1 (MaxPooling (None, 8, 8, 512) 0
2D)
dropout_1 (Dropout) (None, 8, 8, 512) 0
conv2d_4 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_4 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_4 (Activation) (None, 8, 8, 512) 0
conv2d_5 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_5 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_5 (Activation) (None, 8, 8, 512) 0
max_pooling2d_2 (MaxPooling (None, 4, 4, 512) 0
2D)
dropout_2 (Dropout) (None, 4, 4, 512) 0
conv2d_6 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_6 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_6 (Activation) (None, 4, 4, 512) 0
conv2d_7 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_7 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_7 (Activation) (None, 4, 4, 512) 0
max_pooling2d_3 (MaxPooling (None, 2, 2, 512) 0
2D)
dropout_3 (Dropout) (None, 2, 2, 512) 0
flatten (Flatten) (None, 2048) 0
dense (Dense) (None, 512) 1049088
activation_8 (Activation) (None, 512) 0
dropout_4 (Dropout) (None, 512) 0
batch_normalization_8 (Batc (None, 512) 2048
hNormalization)
dense_1 (Dense) (None, 100) 51300
=================================================================
Total params: 14,693,220
Trainable params: 14,685,028
Non-trainable params: 8,192
_________________________________________________________________
Test Accuracy> 58.770
Test Loss> 234.372
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 32, 32, 256) 7168
batch_normalization (BatchN (None, 32, 32, 256) 1024
ormalization)
activation (Activation) (None, 32, 32, 256) 0
conv2d_1 (Conv2D) (None, 32, 32, 256) 590080
batch_normalization_1 (Batc (None, 32, 32, 256) 1024
hNormalization)
activation_1 (Activation) (None, 32, 32, 256) 0
max_pooling2d (MaxPooling2D (None, 16, 16, 256) 0
)
dropout (Dropout) (None, 16, 16, 256) 0
conv2d_2 (Conv2D) (None, 16, 16, 512) 1180160
batch_normalization_2 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_2 (Activation) (None, 16, 16, 512) 0
conv2d_3 (Conv2D) (None, 16, 16, 512) 2359808
batch_normalization_3 (Batc (None, 16, 16, 512) 2048
hNormalization)
activation_3 (Activation) (None, 16, 16, 512) 0
max_pooling2d_1 (MaxPooling (None, 8, 8, 512) 0
2D)
dropout_1 (Dropout) (None, 8, 8, 512) 0
conv2d_4 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_4 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_4 (Activation) (None, 8, 8, 512) 0
conv2d_5 (Conv2D) (None, 8, 8, 512) 2359808
batch_normalization_5 (Batc (None, 8, 8, 512) 2048
hNormalization)
activation_5 (Activation) (None, 8, 8, 512) 0
max_pooling2d_2 (MaxPooling (None, 4, 4, 512) 0
2D)
dropout_2 (Dropout) (None, 4, 4, 512) 0
conv2d_6 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_6 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_6 (Activation) (None, 4, 4, 512) 0
conv2d_7 (Conv2D) (None, 4, 4, 512) 2359808
batch_normalization_7 (Batc (None, 4, 4, 512) 2048
hNormalization)
activation_7 (Activation) (None, 4, 4, 512) 0
max_pooling2d_3 (MaxPooling (None, 2, 2, 512) 0
2D)
dropout_3 (Dropout) (None, 2, 2, 512) 0
flatten (Flatten) (None, 2048) 0
dense (Dense) (None, 512) 1049088
activation_8 (Activation) (None, 512) 0
dropout_4 (Dropout) (None, 512) 0
batch_normalization_8 (Batc (None, 512) 2048
hNormalization)
dense_1 (Dense) (None, 100) 51300
=================================================================
Total params: 14,693,220
Trainable params: 14,685,028
Non-trainable params: 8,192
_________________________________________________________________
--------------------------------------------------------------------------- InternalError Traceback (most recent call last) Cell In [23], line 10 7 act_opt = list(product(activation_names,optimizers)) 9 for act,opt in product(activation,optimizers): ---> 10 acc , loss= run_test_harness_act_opt(act,opt,X_train, y_train, X_test, y_test,X_val,y_val) 11 accuracy_act_opt.append(acc) 12 loss_act_opt.append(loss) Cell In [22], line 78, in run_test_harness_act_opt(act, opt, X_train, y_train, X_test, y_test, X_val, y_val) 75 h_callback = EarlyStopping(monitor='val_accuracy', patience=5) 76 reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2, 77 patience=5, min_lr=0.001) ---> 78 history = model.fit(X_train, y_train, epochs=100, batch_size=128,validation_data=(X_val, y_val), verbose=0 ,callbacks=[h_callback,reduce_lr]) 79 # evaluate model 80 test_loss, test_acc = model.evaluate(X_test, y_test, verbose=0) File c:\Users\Admin\.conda\envs\gpu_env\lib\site-packages\keras\utils\traceback_utils.py:70, in filter_traceback.<locals>.error_handler(*args, **kwargs) 67 filtered_tb = _process_traceback_frames(e.__traceback__) 68 # To get the full stack trace, call: 69 # `tf.debugging.disable_traceback_filtering()` ---> 70 raise e.with_traceback(filtered_tb) from None 71 finally: 72 del filtered_tb File c:\Users\Admin\.conda\envs\gpu_env\lib\site-packages\tensorflow\python\framework\ops.py:1125, in _EagerTensorBase._numpy(self) 1123 return self._numpy_internal() 1124 except core._NotOkStatusException as e: # pylint: disable=protected-access -> 1125 raise core._status_to_exception(e) from None InternalError: Could not synchronize CUDA stream: CUDA_ERROR_ILLEGAL_ADDRESS: an illegal memory access was encountered
# create a dataframe of scores_act_opt and act_opt
df_act_opt = pd.DataFrame({'act_opt':act_opt,'accuracy':accuracy_act_opt,'loss':loss_act_opt})
# highlight highest score_act_opt in df_act_opt
df_act_opt.style.highlight_max(subset=['accuracy'],color='green', axis=0).highlight_min(subset=['loss'],color='green', axis=0)
# style only score_act_opt in df_act_opt
--------------------------------------------------------------------------- ValueError Traceback (most recent call last) Cell In [24], line 2 1 # create a dataframe of scores_act_opt and act_opt ----> 2 df_act_opt = pd.DataFrame({'act_opt':act_opt,'accuracy':accuracy_act_opt,'loss':loss_act_opt}) 3 # highlight highest score_act_opt in df_act_opt 4 df_act_opt.style.highlight_max(subset=['accuracy'],color='green', axis=0).highlight_min(subset=['loss'],color='green', axis=0) File c:\Users\Admin\.conda\envs\gpu_env\lib\site-packages\pandas\core\frame.py:636, in DataFrame.__init__(self, data, index, columns, dtype, copy) 630 mgr = self._init_mgr( 631 data, axes={"index": index, "columns": columns}, dtype=dtype, copy=copy 632 ) 634 elif isinstance(data, dict): 635 # GH#38939 de facto copy defaults to False only in non-dict cases --> 636 mgr = dict_to_mgr(data, index, columns, dtype=dtype, copy=copy, typ=manager) 637 elif isinstance(data, ma.MaskedArray): 638 import numpy.ma.mrecords as mrecords File c:\Users\Admin\.conda\envs\gpu_env\lib\site-packages\pandas\core\internals\construction.py:502, in dict_to_mgr(data, index, columns, dtype, typ, copy) 494 arrays = [ 495 x 496 if not hasattr(x, "dtype") or not isinstance(x.dtype, ExtensionDtype) 497 else x.copy() 498 for x in arrays 499 ] 500 # TODO: can we get rid of the dt64tz special case above? --> 502 return arrays_to_mgr(arrays, columns, index, dtype=dtype, typ=typ, consolidate=copy) File c:\Users\Admin\.conda\envs\gpu_env\lib\site-packages\pandas\core\internals\construction.py:120, in arrays_to_mgr(arrays, columns, index, dtype, verify_integrity, typ, consolidate) 117 if verify_integrity: 118 # figure out the index, if necessary 119 if index is None: --> 120 index = _extract_index(arrays) 121 else: 122 index = ensure_index(index) File c:\Users\Admin\.conda\envs\gpu_env\lib\site-packages\pandas\core\internals\construction.py:674, in _extract_index(data) 672 lengths = list(set(raw_lengths)) 673 if len(lengths) > 1: --> 674 raise ValueError("All arrays must be of the same length") 676 if have_dicts: 677 raise ValueError( 678 "Mixing dicts with non-Series may lead to ambiguous ordering." 679 ) ValueError: All arrays must be of the same length
Model_scores
| Model | Accuracy | Loss | Precision | Recall | F1 Score | |
|---|---|---|---|---|---|---|
| 0 | CNN baseline | 0.0937 | 16.984434 | 0.094939 | 0.0937 | 0.090659 |
| 1 | simple CNN relu max pooling | 0.0937 | 16.984434 | 0.289353 | 0.2780 | 0.279113 |
| 2 | simple CNN relu avg pooling | 0.0937 | 16.984434 | 0.329643 | 0.3260 | 0.324001 |
| 3 | simple CNN relu avg mix max pooling | 0.0937 | 16.984434 | 0.310743 | 0.3080 | 0.305592 |
| 4 | CNN relu avg pooling w same padding | 0.0937 | 16.984434 | 0.277890 | 0.2760 | 0.274180 |
| 5 | CNN relu avg pooling w valid padding | 0.0937 | 16.984434 | 0.301405 | 0.3016 | 0.296297 |
| 6 | deep CNN relu adamax no batch norm | 0.0937 | 16.984434 | 0.418090 | 0.4177 | 0.412984 |
| 7 | deep CNN relu adamax w batch norm | 0.0937 | 16.984434 | 0.586022 | 0.5836 | 0.581206 |
| 8 | CNN relu avg pooling w padding | 0.0937 | 16.984434 | 0.645589 | 0.6399 | 0.638492 |
model = Sequential()
# Convolutional Layer
model.add(Conv2D(filters=32, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=32, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
# Pooling layer
model.add(MaxPooling2D(pool_size=(2, 2)))
# Dropout layers
model.add(Dropout(0.25))
model.add(Conv2D(filters=64, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=64, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
# Pooling layer
model.add(MaxPooling2D(pool_size=(2, 2)))
# Dropout layers
model.add(Dropout(0.25))
# Convolutional Layer
model.add(Conv2D(filters=128, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=128, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=128, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=128, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
# Pooling layer
model.add(MaxPooling2D(pool_size=(2, 2)))
# Dropout layers
model.add(Dropout(0.25))
# Convolutional Layer
model.add(Conv2D(filters=256, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=256, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
# Pooling layer
model.add(MaxPooling2D(pool_size=(2, 2)))
# Dropout layers
model.add(Dropout(0.25))
model.add(Flatten())
# model.add(Dropout(0.2))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(100, activation='softmax',kernel_regularizer=tensorflow.keras.regularizers.L1(0.01)))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
early_stopping = EarlyStopping(monitor='val_loss', patience=20, verbose=1)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=5, min_lr=0.001)
h_callback = model.fit(X_train, y_train, epochs = 300,validation_data=(X_val, y_val),batch_size=256,callbacks=[early_stopping])
# Plot train vs test loss during training
plot_loss(h_callback.history['loss'], h_callback.history['val_loss'])
plot_accuracy(h_callback.history['accuracy'], h_callback.history['val_accuracy'])
# evaluate model
test_loss, test_acc = model.evaluate(X_test, y_test, verbose=0)
precision,recall,f1 = get_metrics(model)
Model_scores = pd.concat([Model_scores,pd.DataFrame([['deep CNN elu sgd batchnorm dropout l1',test_acc,test_loss,precision,recall,f1]],columns=Model_scores.columns)],ignore_index=True)
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 32, 32, 32) 896
batch_normalization (BatchN (None, 32, 32, 32) 128
ormalization)
conv2d_1 (Conv2D) (None, 32, 32, 32) 9248
batch_normalization_1 (Batc (None, 32, 32, 32) 128
hNormalization)
max_pooling2d (MaxPooling2D (None, 16, 16, 32) 0
)
dropout (Dropout) (None, 16, 16, 32) 0
conv2d_2 (Conv2D) (None, 16, 16, 64) 18496
batch_normalization_2 (Batc (None, 16, 16, 64) 256
hNormalization)
conv2d_3 (Conv2D) (None, 16, 16, 64) 36928
batch_normalization_3 (Batc (None, 16, 16, 64) 256
hNormalization)
max_pooling2d_1 (MaxPooling (None, 8, 8, 64) 0
2D)
dropout_1 (Dropout) (None, 8, 8, 64) 0
conv2d_4 (Conv2D) (None, 8, 8, 128) 73856
batch_normalization_4 (Batc (None, 8, 8, 128) 512
hNormalization)
conv2d_5 (Conv2D) (None, 8, 8, 128) 147584
batch_normalization_5 (Batc (None, 8, 8, 128) 512
hNormalization)
conv2d_6 (Conv2D) (None, 8, 8, 128) 147584
batch_normalization_6 (Batc (None, 8, 8, 128) 512
hNormalization)
conv2d_7 (Conv2D) (None, 8, 8, 128) 147584
batch_normalization_7 (Batc (None, 8, 8, 128) 512
hNormalization)
max_pooling2d_2 (MaxPooling (None, 4, 4, 128) 0
2D)
dropout_2 (Dropout) (None, 4, 4, 128) 0
conv2d_8 (Conv2D) (None, 4, 4, 256) 295168
batch_normalization_8 (Batc (None, 4, 4, 256) 1024
hNormalization)
conv2d_9 (Conv2D) (None, 4, 4, 256) 590080
batch_normalization_9 (Batc (None, 4, 4, 256) 1024
hNormalization)
max_pooling2d_3 (MaxPooling (None, 2, 2, 256) 0
2D)
dropout_3 (Dropout) (None, 2, 2, 256) 0
flatten (Flatten) (None, 1024) 0
dense (Dense) (None, 128) 131200
dropout_4 (Dropout) (None, 128) 0
dense_1 (Dense) (None, 100) 12900
=================================================================
Total params: 1,616,388
Trainable params: 1,613,956
Non-trainable params: 2,432
_________________________________________________________________
Epoch 1/300
313/313 [==============================] - 24s 47ms/step - loss: 13.0289 - accuracy: 0.0264 - val_loss: 11.3846 - val_accuracy: 0.0181
Epoch 2/300
313/313 [==============================] - 14s 43ms/step - loss: 9.5193 - accuracy: 0.0561 - val_loss: 8.0292 - val_accuracy: 0.0735
Epoch 3/300
313/313 [==============================] - 14s 43ms/step - loss: 6.8861 - accuracy: 0.0767 - val_loss: 5.9739 - val_accuracy: 0.0762
Epoch 4/300
313/313 [==============================] - 14s 44ms/step - loss: 5.2003 - accuracy: 0.0868 - val_loss: 4.6510 - val_accuracy: 0.0986
Epoch 5/300
313/313 [==============================] - 14s 44ms/step - loss: 4.4333 - accuracy: 0.0905 - val_loss: 4.2914 - val_accuracy: 0.0972
Epoch 6/300
313/313 [==============================] - 14s 43ms/step - loss: 4.2300 - accuracy: 0.0902 - val_loss: 4.2496 - val_accuracy: 0.0934
Epoch 7/300
313/313 [==============================] - 14s 44ms/step - loss: 4.1757 - accuracy: 0.0936 - val_loss: 4.2420 - val_accuracy: 0.0840
Epoch 8/300
313/313 [==============================] - 14s 44ms/step - loss: 4.1379 - accuracy: 0.0945 - val_loss: 4.3998 - val_accuracy: 0.0627
Epoch 9/300
313/313 [==============================] - 14s 43ms/step - loss: 4.1050 - accuracy: 0.0985 - val_loss: 4.1698 - val_accuracy: 0.0949
Epoch 10/300
313/313 [==============================] - 14s 43ms/step - loss: 4.0802 - accuracy: 0.1021 - val_loss: 4.2145 - val_accuracy: 0.0830
Epoch 11/300
313/313 [==============================] - 14s 43ms/step - loss: 4.0528 - accuracy: 0.1066 - val_loss: 4.0291 - val_accuracy: 0.1091
Epoch 12/300
313/313 [==============================] - 14s 44ms/step - loss: 4.0293 - accuracy: 0.1091 - val_loss: 4.0198 - val_accuracy: 0.1176
Epoch 13/300
313/313 [==============================] - 14s 44ms/step - loss: 4.0074 - accuracy: 0.1120 - val_loss: 4.0488 - val_accuracy: 0.1124
Epoch 14/300
313/313 [==============================] - 14s 44ms/step - loss: 3.9821 - accuracy: 0.1160 - val_loss: 4.1303 - val_accuracy: 0.0978
Epoch 15/300
313/313 [==============================] - 14s 44ms/step - loss: 3.9611 - accuracy: 0.1190 - val_loss: 4.1807 - val_accuracy: 0.0941
Epoch 16/300
313/313 [==============================] - 14s 44ms/step - loss: 3.9369 - accuracy: 0.1245 - val_loss: 3.9809 - val_accuracy: 0.1210
Epoch 17/300
313/313 [==============================] - 14s 43ms/step - loss: 3.9199 - accuracy: 0.1264 - val_loss: 4.0032 - val_accuracy: 0.1190
Epoch 18/300
313/313 [==============================] - 14s 44ms/step - loss: 3.8965 - accuracy: 0.1305 - val_loss: 3.8945 - val_accuracy: 0.1287
Epoch 19/300
313/313 [==============================] - 14s 43ms/step - loss: 3.8711 - accuracy: 0.1343 - val_loss: 3.8659 - val_accuracy: 0.1363
Epoch 20/300
313/313 [==============================] - 14s 43ms/step - loss: 3.8577 - accuracy: 0.1371 - val_loss: 3.8120 - val_accuracy: 0.1444
Epoch 21/300
313/313 [==============================] - 14s 44ms/step - loss: 3.8240 - accuracy: 0.1436 - val_loss: 3.8800 - val_accuracy: 0.1429
Epoch 22/300
313/313 [==============================] - 14s 44ms/step - loss: 3.8099 - accuracy: 0.1450 - val_loss: 3.8629 - val_accuracy: 0.1410
Epoch 23/300
313/313 [==============================] - 14s 43ms/step - loss: 3.7892 - accuracy: 0.1483 - val_loss: 3.8669 - val_accuracy: 0.1445
Epoch 24/300
313/313 [==============================] - 14s 44ms/step - loss: 3.7718 - accuracy: 0.1521 - val_loss: 3.7734 - val_accuracy: 0.1491
Epoch 25/300
313/313 [==============================] - 14s 44ms/step - loss: 3.7521 - accuracy: 0.1539 - val_loss: 3.9280 - val_accuracy: 0.1287
Epoch 26/300
313/313 [==============================] - 14s 44ms/step - loss: 3.7326 - accuracy: 0.1576 - val_loss: 3.7740 - val_accuracy: 0.1495
Epoch 27/300
313/313 [==============================] - 14s 44ms/step - loss: 3.7130 - accuracy: 0.1609 - val_loss: 3.7103 - val_accuracy: 0.1648
Epoch 28/300
313/313 [==============================] - 14s 44ms/step - loss: 3.6949 - accuracy: 0.1645 - val_loss: 3.7292 - val_accuracy: 0.1508
Epoch 29/300
313/313 [==============================] - 14s 44ms/step - loss: 3.6731 - accuracy: 0.1677 - val_loss: 3.5805 - val_accuracy: 0.1903
Epoch 30/300
313/313 [==============================] - 14s 44ms/step - loss: 3.6582 - accuracy: 0.1707 - val_loss: 3.6547 - val_accuracy: 0.1790
Epoch 31/300
313/313 [==============================] - 14s 44ms/step - loss: 3.6375 - accuracy: 0.1738 - val_loss: 3.6209 - val_accuracy: 0.1813
Epoch 32/300
313/313 [==============================] - 14s 44ms/step - loss: 3.6235 - accuracy: 0.1762 - val_loss: 3.5489 - val_accuracy: 0.1872
Epoch 33/300
313/313 [==============================] - 14s 44ms/step - loss: 3.6062 - accuracy: 0.1807 - val_loss: 3.5419 - val_accuracy: 0.1853
Epoch 34/300
313/313 [==============================] - 14s 44ms/step - loss: 3.5876 - accuracy: 0.1830 - val_loss: 3.4110 - val_accuracy: 0.2169
Epoch 35/300
313/313 [==============================] - 14s 44ms/step - loss: 3.5705 - accuracy: 0.1858 - val_loss: 3.4399 - val_accuracy: 0.2026
Epoch 36/300
313/313 [==============================] - 14s 43ms/step - loss: 3.5570 - accuracy: 0.1891 - val_loss: 3.4596 - val_accuracy: 0.1963
Epoch 37/300
313/313 [==============================] - 14s 46ms/step - loss: 3.5356 - accuracy: 0.1915 - val_loss: 3.3721 - val_accuracy: 0.2002
Epoch 38/300
313/313 [==============================] - 15s 46ms/step - loss: 3.5215 - accuracy: 0.1960 - val_loss: 3.3641 - val_accuracy: 0.2232
Epoch 39/300
313/313 [==============================] - 14s 46ms/step - loss: 3.5036 - accuracy: 0.1986 - val_loss: 3.3452 - val_accuracy: 0.2179
Epoch 40/300
313/313 [==============================] - 14s 46ms/step - loss: 3.4949 - accuracy: 0.2005 - val_loss: 3.4283 - val_accuracy: 0.2108
Epoch 41/300
313/313 [==============================] - 15s 47ms/step - loss: 3.4784 - accuracy: 0.2048 - val_loss: 3.3199 - val_accuracy: 0.2309
Epoch 42/300
313/313 [==============================] - 15s 48ms/step - loss: 3.4637 - accuracy: 0.2076 - val_loss: 3.3235 - val_accuracy: 0.2334
Epoch 43/300
313/313 [==============================] - 15s 48ms/step - loss: 3.4498 - accuracy: 0.2082 - val_loss: 3.3569 - val_accuracy: 0.2241
Epoch 44/300
313/313 [==============================] - 15s 48ms/step - loss: 3.4389 - accuracy: 0.2117 - val_loss: 3.2307 - val_accuracy: 0.2465
Epoch 45/300
313/313 [==============================] - 15s 48ms/step - loss: 3.4222 - accuracy: 0.2138 - val_loss: 3.2531 - val_accuracy: 0.2493
Epoch 46/300
313/313 [==============================] - 15s 46ms/step - loss: 3.4086 - accuracy: 0.2158 - val_loss: 3.2281 - val_accuracy: 0.2478
Epoch 47/300
313/313 [==============================] - 15s 47ms/step - loss: 3.3932 - accuracy: 0.2177 - val_loss: 3.1778 - val_accuracy: 0.2622
Epoch 48/300
313/313 [==============================] - 14s 46ms/step - loss: 3.3853 - accuracy: 0.2200 - val_loss: 3.2674 - val_accuracy: 0.2469
Epoch 49/300
313/313 [==============================] - 14s 46ms/step - loss: 3.3737 - accuracy: 0.2222 - val_loss: 3.1665 - val_accuracy: 0.2663
Epoch 50/300
313/313 [==============================] - 14s 46ms/step - loss: 3.3646 - accuracy: 0.2263 - val_loss: 3.1209 - val_accuracy: 0.2710
Epoch 51/300
313/313 [==============================] - 15s 46ms/step - loss: 3.3457 - accuracy: 0.2292 - val_loss: 3.1791 - val_accuracy: 0.2530
Epoch 52/300
313/313 [==============================] - 15s 47ms/step - loss: 3.3393 - accuracy: 0.2306 - val_loss: 3.1001 - val_accuracy: 0.2710
Epoch 53/300
313/313 [==============================] - 14s 46ms/step - loss: 3.3230 - accuracy: 0.2330 - val_loss: 3.0398 - val_accuracy: 0.2943
Epoch 54/300
313/313 [==============================] - 14s 46ms/step - loss: 3.3118 - accuracy: 0.2343 - val_loss: 3.1654 - val_accuracy: 0.2639
Epoch 55/300
313/313 [==============================] - 15s 47ms/step - loss: 3.3042 - accuracy: 0.2389 - val_loss: 3.1010 - val_accuracy: 0.2691
Epoch 56/300
313/313 [==============================] - 15s 48ms/step - loss: 3.2943 - accuracy: 0.2384 - val_loss: 3.0524 - val_accuracy: 0.2808
Epoch 57/300
313/313 [==============================] - 15s 48ms/step - loss: 3.2722 - accuracy: 0.2423 - val_loss: 3.1358 - val_accuracy: 0.2731
Epoch 58/300
313/313 [==============================] - 15s 48ms/step - loss: 3.2698 - accuracy: 0.2431 - val_loss: 3.0387 - val_accuracy: 0.2872
Epoch 59/300
313/313 [==============================] - 15s 48ms/step - loss: 3.2567 - accuracy: 0.2473 - val_loss: 3.0374 - val_accuracy: 0.2878
Epoch 60/300
313/313 [==============================] - 15s 48ms/step - loss: 3.2399 - accuracy: 0.2492 - val_loss: 2.9811 - val_accuracy: 0.3113
Epoch 61/300
313/313 [==============================] - 15s 48ms/step - loss: 3.2333 - accuracy: 0.2527 - val_loss: 2.9938 - val_accuracy: 0.3006
Epoch 62/300
313/313 [==============================] - 15s 47ms/step - loss: 3.2235 - accuracy: 0.2532 - val_loss: 2.9713 - val_accuracy: 0.3020
Epoch 63/300
313/313 [==============================] - 14s 45ms/step - loss: 3.2111 - accuracy: 0.2535 - val_loss: 2.9510 - val_accuracy: 0.3021
Epoch 64/300
313/313 [==============================] - 14s 44ms/step - loss: 3.2015 - accuracy: 0.2569 - val_loss: 2.9148 - val_accuracy: 0.3083
Epoch 65/300
313/313 [==============================] - 14s 44ms/step - loss: 3.1935 - accuracy: 0.2573 - val_loss: 2.9743 - val_accuracy: 0.3119
Epoch 66/300
313/313 [==============================] - 14s 44ms/step - loss: 3.1840 - accuracy: 0.2599 - val_loss: 2.8540 - val_accuracy: 0.3222
Epoch 67/300
313/313 [==============================] - 14s 44ms/step - loss: 3.1710 - accuracy: 0.2624 - val_loss: 2.9236 - val_accuracy: 0.3149
Epoch 68/300
313/313 [==============================] - 14s 44ms/step - loss: 3.1655 - accuracy: 0.2643 - val_loss: 2.8956 - val_accuracy: 0.2986
Epoch 69/300
313/313 [==============================] - 14s 44ms/step - loss: 3.1562 - accuracy: 0.2655 - val_loss: 2.9700 - val_accuracy: 0.3066
Epoch 70/300
313/313 [==============================] - 14s 44ms/step - loss: 3.1451 - accuracy: 0.2696 - val_loss: 2.8519 - val_accuracy: 0.3311
Epoch 71/300
313/313 [==============================] - 14s 44ms/step - loss: 3.1359 - accuracy: 0.2703 - val_loss: 2.8902 - val_accuracy: 0.3176
Epoch 72/300
313/313 [==============================] - 14s 44ms/step - loss: 3.1267 - accuracy: 0.2727 - val_loss: 2.8971 - val_accuracy: 0.3236
Epoch 73/300
313/313 [==============================] - 14s 44ms/step - loss: 3.1143 - accuracy: 0.2737 - val_loss: 2.9415 - val_accuracy: 0.3103
Epoch 74/300
313/313 [==============================] - 14s 44ms/step - loss: 3.1073 - accuracy: 0.2754 - val_loss: 2.8290 - val_accuracy: 0.3242
Epoch 75/300
313/313 [==============================] - 14s 44ms/step - loss: 3.0976 - accuracy: 0.2788 - val_loss: 2.8216 - val_accuracy: 0.3343
Epoch 76/300
313/313 [==============================] - 14s 44ms/step - loss: 3.0920 - accuracy: 0.2784 - val_loss: 2.8334 - val_accuracy: 0.3275
Epoch 77/300
313/313 [==============================] - 14s 44ms/step - loss: 3.0836 - accuracy: 0.2796 - val_loss: 2.8086 - val_accuracy: 0.3509
Epoch 78/300
313/313 [==============================] - 14s 44ms/step - loss: 3.0715 - accuracy: 0.2825 - val_loss: 2.7900 - val_accuracy: 0.3346
Epoch 79/300
313/313 [==============================] - 14s 44ms/step - loss: 3.0675 - accuracy: 0.2835 - val_loss: 2.7962 - val_accuracy: 0.3310
Epoch 80/300
313/313 [==============================] - 14s 44ms/step - loss: 3.0559 - accuracy: 0.2846 - val_loss: 2.8339 - val_accuracy: 0.3200
Epoch 81/300
313/313 [==============================] - 14s 44ms/step - loss: 3.0406 - accuracy: 0.2900 - val_loss: 2.7840 - val_accuracy: 0.3403
Epoch 82/300
313/313 [==============================] - 14s 44ms/step - loss: 3.0360 - accuracy: 0.2903 - val_loss: 2.7850 - val_accuracy: 0.3347
Epoch 83/300
313/313 [==============================] - 14s 44ms/step - loss: 3.0297 - accuracy: 0.2910 - val_loss: 2.7183 - val_accuracy: 0.3451
Epoch 84/300
313/313 [==============================] - 14s 43ms/step - loss: 3.0192 - accuracy: 0.2946 - val_loss: 2.7524 - val_accuracy: 0.3464
Epoch 85/300
313/313 [==============================] - 14s 44ms/step - loss: 3.0116 - accuracy: 0.2959 - val_loss: 2.7384 - val_accuracy: 0.3526
Epoch 86/300
313/313 [==============================] - 14s 44ms/step - loss: 3.0003 - accuracy: 0.2983 - val_loss: 2.8002 - val_accuracy: 0.3425
Epoch 87/300
313/313 [==============================] - 14s 44ms/step - loss: 2.9941 - accuracy: 0.2993 - val_loss: 2.6982 - val_accuracy: 0.3564
Epoch 88/300
313/313 [==============================] - 14s 44ms/step - loss: 2.9845 - accuracy: 0.3002 - val_loss: 2.7722 - val_accuracy: 0.3426
Epoch 89/300
313/313 [==============================] - 14s 46ms/step - loss: 2.9781 - accuracy: 0.3014 - val_loss: 2.7549 - val_accuracy: 0.3468
Epoch 90/300
313/313 [==============================] - 14s 44ms/step - loss: 2.9702 - accuracy: 0.3036 - val_loss: 2.6440 - val_accuracy: 0.3684
Epoch 91/300
313/313 [==============================] - 14s 44ms/step - loss: 2.9629 - accuracy: 0.3077 - val_loss: 2.7053 - val_accuracy: 0.3575
Epoch 92/300
313/313 [==============================] - 14s 44ms/step - loss: 2.9501 - accuracy: 0.3087 - val_loss: 2.6409 - val_accuracy: 0.3820
Epoch 93/300
313/313 [==============================] - 14s 44ms/step - loss: 2.9426 - accuracy: 0.3105 - val_loss: 2.6404 - val_accuracy: 0.3730
Epoch 94/300
313/313 [==============================] - 14s 44ms/step - loss: 2.9377 - accuracy: 0.3124 - val_loss: 2.7146 - val_accuracy: 0.3602
Epoch 95/300
313/313 [==============================] - 14s 44ms/step - loss: 2.9236 - accuracy: 0.3147 - val_loss: 2.7276 - val_accuracy: 0.3521
Epoch 96/300
313/313 [==============================] - 14s 44ms/step - loss: 2.9213 - accuracy: 0.3131 - val_loss: 2.6380 - val_accuracy: 0.3761
Epoch 97/300
313/313 [==============================] - 14s 44ms/step - loss: 2.9153 - accuracy: 0.3163 - val_loss: 2.6535 - val_accuracy: 0.3693
Epoch 98/300
313/313 [==============================] - 14s 44ms/step - loss: 2.9013 - accuracy: 0.3180 - val_loss: 2.6481 - val_accuracy: 0.3730
Epoch 99/300
313/313 [==============================] - 14s 44ms/step - loss: 2.8953 - accuracy: 0.3199 - val_loss: 2.6830 - val_accuracy: 0.3597
Epoch 100/300
313/313 [==============================] - 14s 44ms/step - loss: 2.8879 - accuracy: 0.3205 - val_loss: 2.6356 - val_accuracy: 0.3762
Epoch 101/300
313/313 [==============================] - 14s 44ms/step - loss: 2.8776 - accuracy: 0.3225 - val_loss: 2.7120 - val_accuracy: 0.3532
Epoch 102/300
313/313 [==============================] - 14s 44ms/step - loss: 2.8702 - accuracy: 0.3243 - val_loss: 2.6208 - val_accuracy: 0.3860
Epoch 103/300
313/313 [==============================] - 14s 44ms/step - loss: 2.8629 - accuracy: 0.3275 - val_loss: 2.6119 - val_accuracy: 0.3683
Epoch 104/300
313/313 [==============================] - 14s 44ms/step - loss: 2.8574 - accuracy: 0.3295 - val_loss: 2.5451 - val_accuracy: 0.3865
Epoch 105/300
313/313 [==============================] - 14s 44ms/step - loss: 2.8547 - accuracy: 0.3271 - val_loss: 2.5370 - val_accuracy: 0.3963
Epoch 106/300
313/313 [==============================] - 14s 44ms/step - loss: 2.8378 - accuracy: 0.3316 - val_loss: 2.5773 - val_accuracy: 0.3938
Epoch 107/300
313/313 [==============================] - 14s 44ms/step - loss: 2.8305 - accuracy: 0.3370 - val_loss: 2.5772 - val_accuracy: 0.3897
Epoch 108/300
313/313 [==============================] - 14s 44ms/step - loss: 2.8262 - accuracy: 0.3364 - val_loss: 2.5867 - val_accuracy: 0.3821
Epoch 109/300
313/313 [==============================] - 14s 44ms/step - loss: 2.8171 - accuracy: 0.3369 - val_loss: 2.6758 - val_accuracy: 0.3770
Epoch 110/300
313/313 [==============================] - 14s 44ms/step - loss: 2.8077 - accuracy: 0.3379 - val_loss: 2.5639 - val_accuracy: 0.3860
Epoch 111/300
313/313 [==============================] - 14s 44ms/step - loss: 2.8054 - accuracy: 0.3392 - val_loss: 2.5488 - val_accuracy: 0.3945
Epoch 112/300
313/313 [==============================] - 14s 44ms/step - loss: 2.8021 - accuracy: 0.3400 - val_loss: 2.5507 - val_accuracy: 0.3912
Epoch 113/300
313/313 [==============================] - 14s 44ms/step - loss: 2.7855 - accuracy: 0.3454 - val_loss: 2.6013 - val_accuracy: 0.3897
Epoch 114/300
313/313 [==============================] - 14s 44ms/step - loss: 2.7875 - accuracy: 0.3423 - val_loss: 2.6249 - val_accuracy: 0.3803
Epoch 115/300
313/313 [==============================] - 14s 44ms/step - loss: 2.7753 - accuracy: 0.3471 - val_loss: 2.5631 - val_accuracy: 0.3852
Epoch 116/300
313/313 [==============================] - 14s 44ms/step - loss: 2.7714 - accuracy: 0.3466 - val_loss: 2.5202 - val_accuracy: 0.4016
Epoch 117/300
313/313 [==============================] - 14s 44ms/step - loss: 2.7622 - accuracy: 0.3469 - val_loss: 2.5216 - val_accuracy: 0.4041
Epoch 118/300
313/313 [==============================] - 14s 44ms/step - loss: 2.7506 - accuracy: 0.3515 - val_loss: 2.5197 - val_accuracy: 0.4073
Epoch 119/300
313/313 [==============================] - 14s 44ms/step - loss: 2.7482 - accuracy: 0.3528 - val_loss: 2.5268 - val_accuracy: 0.3901
Epoch 120/300
313/313 [==============================] - 14s 44ms/step - loss: 2.7361 - accuracy: 0.3553 - val_loss: 2.4572 - val_accuracy: 0.4163
Epoch 121/300
313/313 [==============================] - 14s 44ms/step - loss: 2.7349 - accuracy: 0.3542 - val_loss: 2.5096 - val_accuracy: 0.4032
Epoch 122/300
313/313 [==============================] - 14s 44ms/step - loss: 2.7245 - accuracy: 0.3548 - val_loss: 2.4759 - val_accuracy: 0.4145
Epoch 123/300
313/313 [==============================] - 14s 44ms/step - loss: 2.7175 - accuracy: 0.3577 - val_loss: 2.5882 - val_accuracy: 0.3901
Epoch 124/300
313/313 [==============================] - 14s 44ms/step - loss: 2.7170 - accuracy: 0.3597 - val_loss: 2.5039 - val_accuracy: 0.4062
Epoch 125/300
313/313 [==============================] - 14s 44ms/step - loss: 2.7002 - accuracy: 0.3618 - val_loss: 2.4783 - val_accuracy: 0.4057
Epoch 126/300
313/313 [==============================] - 15s 48ms/step - loss: 2.6956 - accuracy: 0.3638 - val_loss: 2.4940 - val_accuracy: 0.4000
Epoch 127/300
313/313 [==============================] - 15s 49ms/step - loss: 2.6928 - accuracy: 0.3632 - val_loss: 2.4593 - val_accuracy: 0.4221
Epoch 128/300
313/313 [==============================] - 15s 48ms/step - loss: 2.6823 - accuracy: 0.3655 - val_loss: 2.5211 - val_accuracy: 0.4050
Epoch 129/300
313/313 [==============================] - 15s 49ms/step - loss: 2.6736 - accuracy: 0.3692 - val_loss: 2.5186 - val_accuracy: 0.3993
Epoch 130/300
313/313 [==============================] - 15s 49ms/step - loss: 2.6748 - accuracy: 0.3668 - val_loss: 2.4615 - val_accuracy: 0.4151
Epoch 131/300
313/313 [==============================] - 16s 50ms/step - loss: 2.6627 - accuracy: 0.3698 - val_loss: 2.3997 - val_accuracy: 0.4303
Epoch 132/300
313/313 [==============================] - 15s 48ms/step - loss: 2.6521 - accuracy: 0.3725 - val_loss: 2.4274 - val_accuracy: 0.4316
Epoch 133/300
313/313 [==============================] - 15s 47ms/step - loss: 2.6453 - accuracy: 0.3733 - val_loss: 2.4231 - val_accuracy: 0.4286
Epoch 134/300
313/313 [==============================] - 15s 47ms/step - loss: 2.6363 - accuracy: 0.3751 - val_loss: 2.5446 - val_accuracy: 0.4058
Epoch 135/300
313/313 [==============================] - 15s 47ms/step - loss: 2.6281 - accuracy: 0.3781 - val_loss: 2.4699 - val_accuracy: 0.4245
Epoch 136/300
313/313 [==============================] - 15s 47ms/step - loss: 2.6260 - accuracy: 0.3776 - val_loss: 2.3980 - val_accuracy: 0.4319
Epoch 137/300
313/313 [==============================] - 15s 47ms/step - loss: 2.6206 - accuracy: 0.3797 - val_loss: 2.4793 - val_accuracy: 0.4082
Epoch 138/300
313/313 [==============================] - 15s 47ms/step - loss: 2.6083 - accuracy: 0.3833 - val_loss: 2.5348 - val_accuracy: 0.4106
Epoch 139/300
313/313 [==============================] - 15s 47ms/step - loss: 2.6086 - accuracy: 0.3838 - val_loss: 2.4547 - val_accuracy: 0.4195
Epoch 140/300
313/313 [==============================] - 15s 47ms/step - loss: 2.6001 - accuracy: 0.3860 - val_loss: 2.4128 - val_accuracy: 0.4307
Epoch 141/300
313/313 [==============================] - 15s 47ms/step - loss: 2.5942 - accuracy: 0.3850 - val_loss: 2.3947 - val_accuracy: 0.4322
Epoch 142/300
313/313 [==============================] - 15s 48ms/step - loss: 2.5872 - accuracy: 0.3876 - val_loss: 2.3681 - val_accuracy: 0.4477
Epoch 143/300
313/313 [==============================] - 15s 47ms/step - loss: 2.5734 - accuracy: 0.3896 - val_loss: 2.4158 - val_accuracy: 0.4273
Epoch 144/300
313/313 [==============================] - 15s 47ms/step - loss: 2.5693 - accuracy: 0.3909 - val_loss: 2.4055 - val_accuracy: 0.4328
Epoch 145/300
313/313 [==============================] - 15s 47ms/step - loss: 2.5602 - accuracy: 0.3909 - val_loss: 2.4283 - val_accuracy: 0.4341
Epoch 146/300
313/313 [==============================] - 15s 47ms/step - loss: 2.5489 - accuracy: 0.3966 - val_loss: 2.3606 - val_accuracy: 0.4512
Epoch 147/300
313/313 [==============================] - 15s 47ms/step - loss: 2.5493 - accuracy: 0.3949 - val_loss: 2.3927 - val_accuracy: 0.4396
Epoch 148/300
313/313 [==============================] - 15s 47ms/step - loss: 2.5406 - accuracy: 0.3976 - val_loss: 2.4258 - val_accuracy: 0.4250
Epoch 149/300
313/313 [==============================] - 15s 47ms/step - loss: 2.5359 - accuracy: 0.3977 - val_loss: 2.3860 - val_accuracy: 0.4320
Epoch 150/300
313/313 [==============================] - 15s 48ms/step - loss: 2.5298 - accuracy: 0.3978 - val_loss: 2.3839 - val_accuracy: 0.4359
Epoch 151/300
313/313 [==============================] - 15s 47ms/step - loss: 2.5178 - accuracy: 0.4015 - val_loss: 2.4311 - val_accuracy: 0.4291
Epoch 152/300
313/313 [==============================] - 15s 47ms/step - loss: 2.5210 - accuracy: 0.4014 - val_loss: 2.3423 - val_accuracy: 0.4498
Epoch 153/300
313/313 [==============================] - 15s 47ms/step - loss: 2.5064 - accuracy: 0.4050 - val_loss: 2.4662 - val_accuracy: 0.4234
Epoch 154/300
313/313 [==============================] - 15s 47ms/step - loss: 2.5078 - accuracy: 0.4042 - val_loss: 2.3870 - val_accuracy: 0.4346
Epoch 155/300
313/313 [==============================] - 15s 47ms/step - loss: 2.4936 - accuracy: 0.4084 - val_loss: 2.3724 - val_accuracy: 0.4434
Epoch 156/300
313/313 [==============================] - 15s 47ms/step - loss: 2.4881 - accuracy: 0.4115 - val_loss: 2.5067 - val_accuracy: 0.4110
Epoch 157/300
313/313 [==============================] - 15s 47ms/step - loss: 2.4819 - accuracy: 0.4086 - val_loss: 2.3509 - val_accuracy: 0.4471
Epoch 158/300
313/313 [==============================] - 15s 47ms/step - loss: 2.4676 - accuracy: 0.4131 - val_loss: 2.3633 - val_accuracy: 0.4420
Epoch 159/300
313/313 [==============================] - 15s 47ms/step - loss: 2.4634 - accuracy: 0.4137 - val_loss: 2.4346 - val_accuracy: 0.4339
Epoch 160/300
313/313 [==============================] - 15s 47ms/step - loss: 2.4632 - accuracy: 0.4136 - val_loss: 2.3306 - val_accuracy: 0.4541
Epoch 161/300
313/313 [==============================] - 15s 47ms/step - loss: 2.4525 - accuracy: 0.4182 - val_loss: 2.3544 - val_accuracy: 0.4551
Epoch 162/300
313/313 [==============================] - 15s 47ms/step - loss: 2.4467 - accuracy: 0.4186 - val_loss: 2.3634 - val_accuracy: 0.4504
Epoch 163/300
313/313 [==============================] - 15s 47ms/step - loss: 2.4332 - accuracy: 0.4218 - val_loss: 2.4025 - val_accuracy: 0.4358
Epoch 164/300
313/313 [==============================] - 15s 47ms/step - loss: 2.4333 - accuracy: 0.4220 - val_loss: 2.3523 - val_accuracy: 0.4559
Epoch 165/300
313/313 [==============================] - 15s 47ms/step - loss: 2.4199 - accuracy: 0.4261 - val_loss: 2.3688 - val_accuracy: 0.4470
Epoch 166/300
313/313 [==============================] - 15s 47ms/step - loss: 2.4148 - accuracy: 0.4253 - val_loss: 2.4129 - val_accuracy: 0.4306
Epoch 167/300
313/313 [==============================] - 15s 47ms/step - loss: 2.4118 - accuracy: 0.4278 - val_loss: 2.3153 - val_accuracy: 0.4532
Epoch 168/300
313/313 [==============================] - 15s 47ms/step - loss: 2.4062 - accuracy: 0.4284 - val_loss: 2.3263 - val_accuracy: 0.4562
Epoch 169/300
313/313 [==============================] - 15s 47ms/step - loss: 2.3938 - accuracy: 0.4333 - val_loss: 2.3568 - val_accuracy: 0.4516
Epoch 170/300
313/313 [==============================] - 15s 47ms/step - loss: 2.3909 - accuracy: 0.4316 - val_loss: 2.3542 - val_accuracy: 0.4549
Epoch 171/300
313/313 [==============================] - 15s 47ms/step - loss: 2.3812 - accuracy: 0.4333 - val_loss: 2.3387 - val_accuracy: 0.4556
Epoch 172/300
313/313 [==============================] - 15s 47ms/step - loss: 2.3744 - accuracy: 0.4362 - val_loss: 2.4097 - val_accuracy: 0.4441
Epoch 173/300
313/313 [==============================] - 15s 47ms/step - loss: 2.3703 - accuracy: 0.4362 - val_loss: 2.3769 - val_accuracy: 0.4448
Epoch 174/300
313/313 [==============================] - 15s 47ms/step - loss: 2.3700 - accuracy: 0.4369 - val_loss: 2.3268 - val_accuracy: 0.4535
Epoch 175/300
313/313 [==============================] - 15s 47ms/step - loss: 2.3556 - accuracy: 0.4391 - val_loss: 2.3275 - val_accuracy: 0.4646
Epoch 176/300
313/313 [==============================] - 15s 47ms/step - loss: 2.3561 - accuracy: 0.4392 - val_loss: 2.3107 - val_accuracy: 0.4597
Epoch 177/300
313/313 [==============================] - 15s 47ms/step - loss: 2.3432 - accuracy: 0.4426 - val_loss: 2.3485 - val_accuracy: 0.4602
Epoch 178/300
313/313 [==============================] - 15s 47ms/step - loss: 2.3431 - accuracy: 0.4436 - val_loss: 2.2612 - val_accuracy: 0.4822
Epoch 179/300
313/313 [==============================] - 15s 47ms/step - loss: 2.3331 - accuracy: 0.4441 - val_loss: 2.3905 - val_accuracy: 0.4497
Epoch 180/300
313/313 [==============================] - 15s 47ms/step - loss: 2.3240 - accuracy: 0.4480 - val_loss: 2.3625 - val_accuracy: 0.4491
Epoch 181/300
313/313 [==============================] - 15s 47ms/step - loss: 2.3169 - accuracy: 0.4488 - val_loss: 2.2755 - val_accuracy: 0.4708
Epoch 182/300
313/313 [==============================] - 15s 47ms/step - loss: 2.3102 - accuracy: 0.4505 - val_loss: 2.3066 - val_accuracy: 0.4684
Epoch 183/300
313/313 [==============================] - 15s 47ms/step - loss: 2.3058 - accuracy: 0.4507 - val_loss: 2.2394 - val_accuracy: 0.4830
Epoch 184/300
313/313 [==============================] - 15s 47ms/step - loss: 2.2954 - accuracy: 0.4543 - val_loss: 2.2944 - val_accuracy: 0.4747
Epoch 185/300
313/313 [==============================] - 15s 47ms/step - loss: 2.2959 - accuracy: 0.4540 - val_loss: 2.2942 - val_accuracy: 0.4753
Epoch 186/300
313/313 [==============================] - 15s 47ms/step - loss: 2.2825 - accuracy: 0.4583 - val_loss: 2.3440 - val_accuracy: 0.4629
Epoch 187/300
313/313 [==============================] - 15s 47ms/step - loss: 2.2774 - accuracy: 0.4592 - val_loss: 2.3784 - val_accuracy: 0.4521
Epoch 188/300
313/313 [==============================] - 15s 47ms/step - loss: 2.2744 - accuracy: 0.4575 - val_loss: 2.2675 - val_accuracy: 0.4779
Epoch 189/300
313/313 [==============================] - 15s 47ms/step - loss: 2.2639 - accuracy: 0.4612 - val_loss: 2.2713 - val_accuracy: 0.4765
Epoch 190/300
313/313 [==============================] - 15s 48ms/step - loss: 2.2620 - accuracy: 0.4629 - val_loss: 2.3535 - val_accuracy: 0.4547
Epoch 191/300
313/313 [==============================] - 16s 51ms/step - loss: 2.2469 - accuracy: 0.4667 - val_loss: 2.2820 - val_accuracy: 0.4867
Epoch 192/300
313/313 [==============================] - 15s 47ms/step - loss: 2.2488 - accuracy: 0.4644 - val_loss: 2.3225 - val_accuracy: 0.4680
Epoch 193/300
313/313 [==============================] - 16s 50ms/step - loss: 2.2460 - accuracy: 0.4665 - val_loss: 2.3437 - val_accuracy: 0.4585
Epoch 194/300
313/313 [==============================] - 15s 49ms/step - loss: 2.2333 - accuracy: 0.4705 - val_loss: 2.2889 - val_accuracy: 0.4701
Epoch 195/300
313/313 [==============================] - 16s 50ms/step - loss: 2.2254 - accuracy: 0.4711 - val_loss: 2.3075 - val_accuracy: 0.4781
Epoch 196/300
313/313 [==============================] - 16s 50ms/step - loss: 2.2209 - accuracy: 0.4723 - val_loss: 2.2807 - val_accuracy: 0.4694
Epoch 197/300
313/313 [==============================] - 16s 50ms/step - loss: 2.2099 - accuracy: 0.4752 - val_loss: 2.3289 - val_accuracy: 0.4727
Epoch 198/300
313/313 [==============================] - 15s 49ms/step - loss: 2.2080 - accuracy: 0.4769 - val_loss: 2.2862 - val_accuracy: 0.4733
Epoch 199/300
313/313 [==============================] - 15s 49ms/step - loss: 2.2060 - accuracy: 0.4760 - val_loss: 2.3262 - val_accuracy: 0.4697
Epoch 200/300
313/313 [==============================] - 15s 49ms/step - loss: 2.2002 - accuracy: 0.4779 - val_loss: 2.3006 - val_accuracy: 0.4862
Epoch 201/300
313/313 [==============================] - 16s 50ms/step - loss: 2.1950 - accuracy: 0.4790 - val_loss: 2.3329 - val_accuracy: 0.4713
Epoch 202/300
313/313 [==============================] - 15s 50ms/step - loss: 2.1851 - accuracy: 0.4818 - val_loss: 2.2585 - val_accuracy: 0.4858
Epoch 203/300
313/313 [==============================] - 15s 49ms/step - loss: 2.1775 - accuracy: 0.4823 - val_loss: 2.3013 - val_accuracy: 0.4808
Epoch 203: early stopping
313/313 [==============================] - 2s 6ms/step
c:\Users\Admin\.conda\envs\gpu_env\lib\site-packages\sklearn\metrics\_classification.py:1334: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
--------------------------------------------------------------------------- NameError Traceback (most recent call last) Cell In [11], line 71 69 test_loss, test_acc = model.evaluate(X_test, y_test, verbose=0) 70 precision,recall,f1 = get_metrics(model) ---> 71 Model_scores = pd.concat([Model_scores,pd.DataFrame([['deep CNN elu sgd batchnorm dropout l1',test_acc,test_loss,precision,recall,f1]],columns=Model_scores.columns)],ignore_index=True) NameError: name 'Model_scores' is not defined
visualkeras.layered_view(model,legend=True).show() # display using your system viewer
model = Sequential()
# Convolutional Layer
model.add(Conv2D(filters=32, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=32, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
# Pooling layer
model.add(MaxPooling2D(pool_size=(2, 2)))
# Dropout layers
model.add(Dropout(0.25))
model.add(Conv2D(filters=64, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=64, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
# Pooling layer
model.add(MaxPooling2D(pool_size=(2, 2)))
# Dropout layers
model.add(Dropout(0.25))
# Convolutional Layer
model.add(Conv2D(filters=128, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=128, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=128, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=128, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
# Pooling layer
model.add(MaxPooling2D(pool_size=(2, 2)))
# Dropout layers
model.add(Dropout(0.25))
# Convolutional Layer
model.add(Conv2D(filters=256, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=256, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
# Pooling layer
model.add(MaxPooling2D(pool_size=(2, 2)))
# Dropout layers
model.add(Dropout(0.25))
model.add(Flatten())
# model.add(Dropout(0.2))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(100, activation='softmax',kernel_regularizer=tensorflow.keras.regularizers.L2(0.01)))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
early_stopping = EarlyStopping(monitor='val_loss', patience=20, verbose=1)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=5, min_lr=0.001)
h_callback = model.fit(X_train, y_train, epochs = 300,validation_data=(X_val, y_val),batch_size=256,callbacks=[early_stopping])
# Plot train vs test loss during training
plot_loss(h_callback.history['loss'], h_callback.history['val_loss'])
plot_accuracy(h_callback.history['accuracy'], h_callback.history['val_accuracy'])
# evaluate model
test_loss, test_acc = model.evaluate(X_test, y_test, verbose=0)
precision,recall,f1 = get_metrics(model)
# Model_scores = pd.concat([Model_scores,pd.DataFrame([['deep CNN elu sgd batchnorm dropout l1',test_acc,test_loss,precision,recall,f1]],columns=Model_scores.columns)],ignore_index=True)
Model: "sequential_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d_10 (Conv2D) (None, 32, 32, 32) 896
batch_normalization_10 (Bat (None, 32, 32, 32) 128
chNormalization)
conv2d_11 (Conv2D) (None, 32, 32, 32) 9248
batch_normalization_11 (Bat (None, 32, 32, 32) 128
chNormalization)
max_pooling2d_4 (MaxPooling (None, 16, 16, 32) 0
2D)
dropout_5 (Dropout) (None, 16, 16, 32) 0
conv2d_12 (Conv2D) (None, 16, 16, 64) 18496
batch_normalization_12 (Bat (None, 16, 16, 64) 256
chNormalization)
conv2d_13 (Conv2D) (None, 16, 16, 64) 36928
batch_normalization_13 (Bat (None, 16, 16, 64) 256
chNormalization)
max_pooling2d_5 (MaxPooling (None, 8, 8, 64) 0
2D)
dropout_6 (Dropout) (None, 8, 8, 64) 0
conv2d_14 (Conv2D) (None, 8, 8, 128) 73856
batch_normalization_14 (Bat (None, 8, 8, 128) 512
chNormalization)
conv2d_15 (Conv2D) (None, 8, 8, 128) 147584
batch_normalization_15 (Bat (None, 8, 8, 128) 512
chNormalization)
conv2d_16 (Conv2D) (None, 8, 8, 128) 147584
batch_normalization_16 (Bat (None, 8, 8, 128) 512
chNormalization)
conv2d_17 (Conv2D) (None, 8, 8, 128) 147584
batch_normalization_17 (Bat (None, 8, 8, 128) 512
chNormalization)
max_pooling2d_6 (MaxPooling (None, 4, 4, 128) 0
2D)
dropout_7 (Dropout) (None, 4, 4, 128) 0
conv2d_18 (Conv2D) (None, 4, 4, 256) 295168
batch_normalization_18 (Bat (None, 4, 4, 256) 1024
chNormalization)
conv2d_19 (Conv2D) (None, 4, 4, 256) 590080
batch_normalization_19 (Bat (None, 4, 4, 256) 1024
chNormalization)
max_pooling2d_7 (MaxPooling (None, 2, 2, 256) 0
2D)
dropout_8 (Dropout) (None, 2, 2, 256) 0
flatten_1 (Flatten) (None, 1024) 0
dense_2 (Dense) (None, 128) 131200
dropout_9 (Dropout) (None, 128) 0
dense_3 (Dense) (None, 100) 12900
=================================================================
Total params: 1,616,388
Trainable params: 1,613,956
Non-trainable params: 2,432
_________________________________________________________________
Epoch 1/300
313/313 [==============================] - 17s 49ms/step - loss: 5.6354 - accuracy: 0.0218 - val_loss: 5.5488 - val_accuracy: 0.0190
Epoch 2/300
313/313 [==============================] - 14s 46ms/step - loss: 5.2900 - accuracy: 0.0474 - val_loss: 5.0089 - val_accuracy: 0.0811
Epoch 3/300
313/313 [==============================] - 15s 46ms/step - loss: 4.9974 - accuracy: 0.0722 - val_loss: 4.7498 - val_accuracy: 0.0972
Epoch 4/300
313/313 [==============================] - 14s 46ms/step - loss: 4.7409 - accuracy: 0.0881 - val_loss: 4.6278 - val_accuracy: 0.1007
Epoch 5/300
313/313 [==============================] - 15s 47ms/step - loss: 4.5324 - accuracy: 0.1007 - val_loss: 4.3170 - val_accuracy: 0.1372
Epoch 6/300
313/313 [==============================] - 15s 47ms/step - loss: 4.3584 - accuracy: 0.1138 - val_loss: 4.2056 - val_accuracy: 0.1427
Epoch 7/300
313/313 [==============================] - 15s 47ms/step - loss: 4.2071 - accuracy: 0.1280 - val_loss: 4.2284 - val_accuracy: 0.1259
Epoch 8/300
313/313 [==============================] - 13s 43ms/step - loss: 4.0785 - accuracy: 0.1362 - val_loss: 4.1069 - val_accuracy: 0.1395
Epoch 9/300
313/313 [==============================] - 13s 41ms/step - loss: 3.9552 - accuracy: 0.1488 - val_loss: 4.0575 - val_accuracy: 0.1358
Epoch 10/300
313/313 [==============================] - 13s 41ms/step - loss: 3.8498 - accuracy: 0.1593 - val_loss: 3.9034 - val_accuracy: 0.1518
Epoch 11/300
313/313 [==============================] - 13s 41ms/step - loss: 3.7569 - accuracy: 0.1691 - val_loss: 3.7576 - val_accuracy: 0.1745
Epoch 12/300
313/313 [==============================] - 13s 41ms/step - loss: 3.6651 - accuracy: 0.1774 - val_loss: 3.5777 - val_accuracy: 0.1921
Epoch 13/300
313/313 [==============================] - 13s 41ms/step - loss: 3.5884 - accuracy: 0.1858 - val_loss: 3.7146 - val_accuracy: 0.1799
Epoch 14/300
313/313 [==============================] - 13s 41ms/step - loss: 3.5198 - accuracy: 0.1938 - val_loss: 3.4199 - val_accuracy: 0.2121
Epoch 15/300
313/313 [==============================] - 13s 41ms/step - loss: 3.4536 - accuracy: 0.1999 - val_loss: 3.3711 - val_accuracy: 0.2224
Epoch 16/300
313/313 [==============================] - 13s 41ms/step - loss: 3.3944 - accuracy: 0.2087 - val_loss: 3.3469 - val_accuracy: 0.2242
Epoch 17/300
313/313 [==============================] - 13s 41ms/step - loss: 3.3454 - accuracy: 0.2148 - val_loss: 3.2274 - val_accuracy: 0.2424
Epoch 18/300
313/313 [==============================] - 13s 41ms/step - loss: 3.2953 - accuracy: 0.2224 - val_loss: 3.4343 - val_accuracy: 0.2080
Epoch 19/300
313/313 [==============================] - 13s 41ms/step - loss: 3.2515 - accuracy: 0.2289 - val_loss: 3.4543 - val_accuracy: 0.1948
Epoch 20/300
313/313 [==============================] - 13s 41ms/step - loss: 3.2110 - accuracy: 0.2347 - val_loss: 3.1982 - val_accuracy: 0.2389
Epoch 21/300
313/313 [==============================] - 13s 41ms/step - loss: 3.1675 - accuracy: 0.2409 - val_loss: 3.2450 - val_accuracy: 0.2325
Epoch 22/300
313/313 [==============================] - 13s 41ms/step - loss: 3.1374 - accuracy: 0.2456 - val_loss: 3.1773 - val_accuracy: 0.2427
Epoch 23/300
313/313 [==============================] - 13s 41ms/step - loss: 3.0968 - accuracy: 0.2528 - val_loss: 2.9455 - val_accuracy: 0.2842
Epoch 24/300
313/313 [==============================] - 13s 41ms/step - loss: 3.0644 - accuracy: 0.2567 - val_loss: 2.9045 - val_accuracy: 0.2908
Epoch 25/300
313/313 [==============================] - 13s 41ms/step - loss: 3.0303 - accuracy: 0.2630 - val_loss: 3.0087 - val_accuracy: 0.2711
Epoch 26/300
313/313 [==============================] - 13s 41ms/step - loss: 2.9992 - accuracy: 0.2688 - val_loss: 3.1436 - val_accuracy: 0.2571
Epoch 27/300
313/313 [==============================] - 13s 41ms/step - loss: 2.9707 - accuracy: 0.2716 - val_loss: 2.9468 - val_accuracy: 0.2783
Epoch 28/300
313/313 [==============================] - 13s 41ms/step - loss: 2.9463 - accuracy: 0.2757 - val_loss: 2.7800 - val_accuracy: 0.3061
Epoch 29/300
313/313 [==============================] - 13s 41ms/step - loss: 2.9246 - accuracy: 0.2802 - val_loss: 2.7551 - val_accuracy: 0.3121
Epoch 30/300
313/313 [==============================] - 13s 41ms/step - loss: 2.8920 - accuracy: 0.2865 - val_loss: 2.8629 - val_accuracy: 0.2957
Epoch 31/300
313/313 [==============================] - 13s 41ms/step - loss: 2.8697 - accuracy: 0.2902 - val_loss: 2.7803 - val_accuracy: 0.3073
Epoch 32/300
313/313 [==============================] - 13s 41ms/step - loss: 2.8459 - accuracy: 0.2942 - val_loss: 2.7320 - val_accuracy: 0.3188
Epoch 33/300
313/313 [==============================] - 13s 41ms/step - loss: 2.8270 - accuracy: 0.2963 - val_loss: 2.6519 - val_accuracy: 0.3337
Epoch 34/300
313/313 [==============================] - 13s 41ms/step - loss: 2.8042 - accuracy: 0.3002 - val_loss: 2.7260 - val_accuracy: 0.3234
Epoch 35/300
313/313 [==============================] - 13s 41ms/step - loss: 2.7773 - accuracy: 0.3080 - val_loss: 2.7398 - val_accuracy: 0.3235
Epoch 36/300
313/313 [==============================] - 13s 41ms/step - loss: 2.7641 - accuracy: 0.3099 - val_loss: 2.6275 - val_accuracy: 0.3403
Epoch 37/300
313/313 [==============================] - 13s 41ms/step - loss: 2.7445 - accuracy: 0.3105 - val_loss: 2.5358 - val_accuracy: 0.3565
Epoch 38/300
313/313 [==============================] - 13s 41ms/step - loss: 2.7220 - accuracy: 0.3169 - val_loss: 2.5178 - val_accuracy: 0.3591
Epoch 39/300
313/313 [==============================] - 13s 41ms/step - loss: 2.7106 - accuracy: 0.3207 - val_loss: 2.5134 - val_accuracy: 0.3586
Epoch 40/300
313/313 [==============================] - 13s 41ms/step - loss: 2.6929 - accuracy: 0.3235 - val_loss: 2.4956 - val_accuracy: 0.3693
Epoch 41/300
313/313 [==============================] - 13s 41ms/step - loss: 2.6730 - accuracy: 0.3257 - val_loss: 2.5382 - val_accuracy: 0.3578
Epoch 42/300
313/313 [==============================] - 13s 41ms/step - loss: 2.6516 - accuracy: 0.3298 - val_loss: 2.4511 - val_accuracy: 0.3720
Epoch 43/300
313/313 [==============================] - 13s 41ms/step - loss: 2.6419 - accuracy: 0.3317 - val_loss: 2.4427 - val_accuracy: 0.3810
Epoch 44/300
313/313 [==============================] - 13s 41ms/step - loss: 2.6220 - accuracy: 0.3377 - val_loss: 2.4070 - val_accuracy: 0.3837
Epoch 45/300
313/313 [==============================] - 13s 41ms/step - loss: 2.6085 - accuracy: 0.3406 - val_loss: 2.4378 - val_accuracy: 0.3790
Epoch 46/300
313/313 [==============================] - 13s 41ms/step - loss: 2.5948 - accuracy: 0.3429 - val_loss: 2.3947 - val_accuracy: 0.3898
Epoch 47/300
313/313 [==============================] - 13s 41ms/step - loss: 2.5796 - accuracy: 0.3477 - val_loss: 2.3154 - val_accuracy: 0.4057
Epoch 48/300
313/313 [==============================] - 13s 41ms/step - loss: 2.5661 - accuracy: 0.3468 - val_loss: 2.3320 - val_accuracy: 0.4009
Epoch 49/300
313/313 [==============================] - 13s 41ms/step - loss: 2.5503 - accuracy: 0.3488 - val_loss: 2.3816 - val_accuracy: 0.3932
Epoch 50/300
313/313 [==============================] - 13s 41ms/step - loss: 2.5340 - accuracy: 0.3536 - val_loss: 2.3537 - val_accuracy: 0.3979
Epoch 51/300
313/313 [==============================] - 13s 41ms/step - loss: 2.5178 - accuracy: 0.3591 - val_loss: 2.2927 - val_accuracy: 0.4083
Epoch 52/300
313/313 [==============================] - 13s 41ms/step - loss: 2.5068 - accuracy: 0.3595 - val_loss: 2.3012 - val_accuracy: 0.4120
Epoch 53/300
313/313 [==============================] - 13s 41ms/step - loss: 2.4932 - accuracy: 0.3624 - val_loss: 2.2692 - val_accuracy: 0.4145
Epoch 54/300
313/313 [==============================] - 13s 41ms/step - loss: 2.4800 - accuracy: 0.3663 - val_loss: 2.3027 - val_accuracy: 0.4100
Epoch 55/300
313/313 [==============================] - 13s 41ms/step - loss: 2.4694 - accuracy: 0.3683 - val_loss: 2.2611 - val_accuracy: 0.4203
Epoch 56/300
313/313 [==============================] - 13s 41ms/step - loss: 2.4574 - accuracy: 0.3702 - val_loss: 2.2521 - val_accuracy: 0.4180
Epoch 57/300
313/313 [==============================] - 13s 41ms/step - loss: 2.4456 - accuracy: 0.3722 - val_loss: 2.3314 - val_accuracy: 0.4057
Epoch 58/300
313/313 [==============================] - 13s 41ms/step - loss: 2.4320 - accuracy: 0.3767 - val_loss: 2.2776 - val_accuracy: 0.4157
Epoch 59/300
313/313 [==============================] - 13s 41ms/step - loss: 2.4150 - accuracy: 0.3779 - val_loss: 2.2546 - val_accuracy: 0.4250
Epoch 60/300
313/313 [==============================] - 13s 41ms/step - loss: 2.4118 - accuracy: 0.3799 - val_loss: 2.2210 - val_accuracy: 0.4243
Epoch 61/300
313/313 [==============================] - 13s 41ms/step - loss: 2.3975 - accuracy: 0.3842 - val_loss: 2.2007 - val_accuracy: 0.4305
Epoch 62/300
313/313 [==============================] - 13s 41ms/step - loss: 2.3843 - accuracy: 0.3856 - val_loss: 2.1887 - val_accuracy: 0.4349
Epoch 63/300
313/313 [==============================] - 13s 41ms/step - loss: 2.3764 - accuracy: 0.3882 - val_loss: 2.2966 - val_accuracy: 0.4122
Epoch 64/300
313/313 [==============================] - 13s 41ms/step - loss: 2.3643 - accuracy: 0.3916 - val_loss: 2.2566 - val_accuracy: 0.4233
Epoch 65/300
313/313 [==============================] - 13s 41ms/step - loss: 2.3561 - accuracy: 0.3929 - val_loss: 2.2630 - val_accuracy: 0.4217
Epoch 66/300
313/313 [==============================] - 13s 41ms/step - loss: 2.3398 - accuracy: 0.3965 - val_loss: 2.1368 - val_accuracy: 0.4482
Epoch 67/300
313/313 [==============================] - 13s 41ms/step - loss: 2.3300 - accuracy: 0.3979 - val_loss: 2.1487 - val_accuracy: 0.4442
Epoch 68/300
313/313 [==============================] - 13s 41ms/step - loss: 2.3243 - accuracy: 0.3979 - val_loss: 2.2108 - val_accuracy: 0.4295
Epoch 69/300
313/313 [==============================] - 13s 41ms/step - loss: 2.3171 - accuracy: 0.4007 - val_loss: 2.1052 - val_accuracy: 0.4503
Epoch 70/300
313/313 [==============================] - 13s 41ms/step - loss: 2.2975 - accuracy: 0.4052 - val_loss: 2.2016 - val_accuracy: 0.4275
Epoch 71/300
313/313 [==============================] - 13s 41ms/step - loss: 2.2893 - accuracy: 0.4063 - val_loss: 2.0766 - val_accuracy: 0.4588
Epoch 72/300
313/313 [==============================] - 13s 41ms/step - loss: 2.2811 - accuracy: 0.4065 - val_loss: 2.1478 - val_accuracy: 0.4464
Epoch 73/300
313/313 [==============================] - 13s 41ms/step - loss: 2.2733 - accuracy: 0.4112 - val_loss: 2.0806 - val_accuracy: 0.4569
Epoch 74/300
313/313 [==============================] - 13s 41ms/step - loss: 2.2600 - accuracy: 0.4128 - val_loss: 2.1451 - val_accuracy: 0.4456
Epoch 75/300
313/313 [==============================] - 13s 41ms/step - loss: 2.2483 - accuracy: 0.4150 - val_loss: 2.0860 - val_accuracy: 0.4593
Epoch 76/300
313/313 [==============================] - 13s 41ms/step - loss: 2.2407 - accuracy: 0.4168 - val_loss: 2.0877 - val_accuracy: 0.4631
Epoch 77/300
313/313 [==============================] - 13s 41ms/step - loss: 2.2294 - accuracy: 0.4198 - val_loss: 2.1046 - val_accuracy: 0.4562
Epoch 78/300
313/313 [==============================] - 13s 41ms/step - loss: 2.2198 - accuracy: 0.4204 - val_loss: 2.0974 - val_accuracy: 0.4609
Epoch 79/300
313/313 [==============================] - 13s 41ms/step - loss: 2.2014 - accuracy: 0.4255 - val_loss: 2.1011 - val_accuracy: 0.4545
Epoch 80/300
313/313 [==============================] - 13s 41ms/step - loss: 2.2010 - accuracy: 0.4279 - val_loss: 2.0965 - val_accuracy: 0.4586
Epoch 81/300
313/313 [==============================] - 13s 41ms/step - loss: 2.1880 - accuracy: 0.4304 - val_loss: 2.0521 - val_accuracy: 0.4661
Epoch 82/300
313/313 [==============================] - 13s 41ms/step - loss: 2.1796 - accuracy: 0.4317 - val_loss: 2.1503 - val_accuracy: 0.4436
Epoch 83/300
313/313 [==============================] - 13s 41ms/step - loss: 2.1774 - accuracy: 0.4316 - val_loss: 2.0011 - val_accuracy: 0.4799
Epoch 84/300
313/313 [==============================] - 13s 41ms/step - loss: 2.1655 - accuracy: 0.4349 - val_loss: 2.0412 - val_accuracy: 0.4709
Epoch 85/300
313/313 [==============================] - 13s 41ms/step - loss: 2.1569 - accuracy: 0.4356 - val_loss: 2.0481 - val_accuracy: 0.4711
Epoch 86/300
313/313 [==============================] - 13s 41ms/step - loss: 2.1444 - accuracy: 0.4387 - val_loss: 2.0086 - val_accuracy: 0.4773
Epoch 87/300
313/313 [==============================] - 13s 41ms/step - loss: 2.1339 - accuracy: 0.4395 - val_loss: 2.0061 - val_accuracy: 0.4807
Epoch 88/300
313/313 [==============================] - 13s 41ms/step - loss: 2.1270 - accuracy: 0.4424 - val_loss: 2.0407 - val_accuracy: 0.4686
Epoch 89/300
313/313 [==============================] - 13s 41ms/step - loss: 2.1141 - accuracy: 0.4464 - val_loss: 2.0207 - val_accuracy: 0.4762
Epoch 90/300
313/313 [==============================] - 13s 41ms/step - loss: 2.1088 - accuracy: 0.4434 - val_loss: 2.0087 - val_accuracy: 0.4764
Epoch 91/300
313/313 [==============================] - 13s 41ms/step - loss: 2.0960 - accuracy: 0.4476 - val_loss: 2.0128 - val_accuracy: 0.4758
Epoch 92/300
313/313 [==============================] - 13s 41ms/step - loss: 2.0868 - accuracy: 0.4505 - val_loss: 1.9665 - val_accuracy: 0.4878
Epoch 93/300
313/313 [==============================] - 13s 41ms/step - loss: 2.0820 - accuracy: 0.4528 - val_loss: 2.0149 - val_accuracy: 0.4836
Epoch 94/300
313/313 [==============================] - 13s 41ms/step - loss: 2.0791 - accuracy: 0.4530 - val_loss: 1.9948 - val_accuracy: 0.4798
Epoch 95/300
313/313 [==============================] - 13s 41ms/step - loss: 2.0600 - accuracy: 0.4566 - val_loss: 1.9768 - val_accuracy: 0.4810
Epoch 96/300
313/313 [==============================] - 13s 41ms/step - loss: 2.0580 - accuracy: 0.4568 - val_loss: 1.9901 - val_accuracy: 0.4824
Epoch 97/300
313/313 [==============================] - 13s 41ms/step - loss: 2.0494 - accuracy: 0.4621 - val_loss: 1.9214 - val_accuracy: 0.4928
Epoch 98/300
313/313 [==============================] - 13s 41ms/step - loss: 2.0377 - accuracy: 0.4627 - val_loss: 1.9541 - val_accuracy: 0.4891
Epoch 99/300
313/313 [==============================] - 13s 41ms/step - loss: 2.0309 - accuracy: 0.4637 - val_loss: 1.9366 - val_accuracy: 0.4934
Epoch 100/300
313/313 [==============================] - 13s 41ms/step - loss: 2.0200 - accuracy: 0.4667 - val_loss: 2.0001 - val_accuracy: 0.4821
Epoch 101/300
313/313 [==============================] - 13s 41ms/step - loss: 2.0126 - accuracy: 0.4690 - val_loss: 2.0251 - val_accuracy: 0.4737
Epoch 102/300
313/313 [==============================] - 13s 41ms/step - loss: 2.0016 - accuracy: 0.4711 - val_loss: 1.9767 - val_accuracy: 0.4840
Epoch 103/300
313/313 [==============================] - 13s 41ms/step - loss: 1.9954 - accuracy: 0.4721 - val_loss: 1.9456 - val_accuracy: 0.4892
Epoch 104/300
313/313 [==============================] - 13s 41ms/step - loss: 1.9855 - accuracy: 0.4746 - val_loss: 1.9490 - val_accuracy: 0.4902
Epoch 105/300
313/313 [==============================] - 13s 41ms/step - loss: 1.9811 - accuracy: 0.4750 - val_loss: 1.9663 - val_accuracy: 0.4897
Epoch 106/300
313/313 [==============================] - 13s 41ms/step - loss: 1.9714 - accuracy: 0.4758 - val_loss: 1.9592 - val_accuracy: 0.4916
Epoch 107/300
313/313 [==============================] - 13s 41ms/step - loss: 1.9588 - accuracy: 0.4799 - val_loss: 1.9505 - val_accuracy: 0.4964
Epoch 108/300
313/313 [==============================] - 13s 41ms/step - loss: 1.9544 - accuracy: 0.4812 - val_loss: 1.9380 - val_accuracy: 0.4981
Epoch 109/300
313/313 [==============================] - 13s 41ms/step - loss: 1.9475 - accuracy: 0.4833 - val_loss: 1.8973 - val_accuracy: 0.5080
Epoch 110/300
313/313 [==============================] - 13s 41ms/step - loss: 1.9350 - accuracy: 0.4850 - val_loss: 1.9264 - val_accuracy: 0.4974
Epoch 111/300
313/313 [==============================] - 13s 41ms/step - loss: 1.9283 - accuracy: 0.4861 - val_loss: 1.9600 - val_accuracy: 0.4971
Epoch 112/300
313/313 [==============================] - 13s 41ms/step - loss: 1.9266 - accuracy: 0.4877 - val_loss: 1.9048 - val_accuracy: 0.5036
Epoch 113/300
313/313 [==============================] - 13s 41ms/step - loss: 1.9132 - accuracy: 0.4908 - val_loss: 1.9407 - val_accuracy: 0.5028
Epoch 114/300
313/313 [==============================] - 13s 41ms/step - loss: 1.9082 - accuracy: 0.4914 - val_loss: 1.9003 - val_accuracy: 0.5087
Epoch 115/300
313/313 [==============================] - 13s 41ms/step - loss: 1.8972 - accuracy: 0.4963 - val_loss: 1.9499 - val_accuracy: 0.4921
Epoch 116/300
313/313 [==============================] - 13s 41ms/step - loss: 1.8866 - accuracy: 0.4963 - val_loss: 1.8889 - val_accuracy: 0.5090
Epoch 117/300
313/313 [==============================] - 13s 41ms/step - loss: 1.8816 - accuracy: 0.4979 - val_loss: 1.9053 - val_accuracy: 0.5093
Epoch 118/300
313/313 [==============================] - 13s 41ms/step - loss: 1.8791 - accuracy: 0.4993 - val_loss: 1.9096 - val_accuracy: 0.5050
Epoch 119/300
313/313 [==============================] - 13s 41ms/step - loss: 1.8630 - accuracy: 0.5039 - val_loss: 1.9188 - val_accuracy: 0.5026
Epoch 120/300
313/313 [==============================] - 13s 41ms/step - loss: 1.8550 - accuracy: 0.5039 - val_loss: 2.0579 - val_accuracy: 0.4888
Epoch 121/300
313/313 [==============================] - 13s 41ms/step - loss: 1.8484 - accuracy: 0.5046 - val_loss: 1.9351 - val_accuracy: 0.5021
Epoch 122/300
313/313 [==============================] - 13s 41ms/step - loss: 1.8435 - accuracy: 0.5072 - val_loss: 1.8848 - val_accuracy: 0.5086
Epoch 123/300
313/313 [==============================] - 13s 41ms/step - loss: 1.8314 - accuracy: 0.5088 - val_loss: 1.8882 - val_accuracy: 0.5103
Epoch 124/300
313/313 [==============================] - 13s 41ms/step - loss: 1.8292 - accuracy: 0.5124 - val_loss: 1.9483 - val_accuracy: 0.4995
Epoch 125/300
313/313 [==============================] - 13s 41ms/step - loss: 1.8167 - accuracy: 0.5124 - val_loss: 1.8841 - val_accuracy: 0.5101
Epoch 126/300
313/313 [==============================] - 13s 41ms/step - loss: 1.8120 - accuracy: 0.5138 - val_loss: 1.9367 - val_accuracy: 0.4961
Epoch 127/300
313/313 [==============================] - 13s 41ms/step - loss: 1.8022 - accuracy: 0.5146 - val_loss: 1.8814 - val_accuracy: 0.5131
Epoch 128/300
313/313 [==============================] - 13s 41ms/step - loss: 1.7935 - accuracy: 0.5186 - val_loss: 1.9389 - val_accuracy: 0.5085
Epoch 129/300
313/313 [==============================] - 13s 41ms/step - loss: 1.7923 - accuracy: 0.5191 - val_loss: 1.9540 - val_accuracy: 0.4991
Epoch 130/300
313/313 [==============================] - 13s 41ms/step - loss: 1.7768 - accuracy: 0.5240 - val_loss: 1.9295 - val_accuracy: 0.5045
Epoch 131/300
313/313 [==============================] - 13s 41ms/step - loss: 1.7712 - accuracy: 0.5219 - val_loss: 1.8715 - val_accuracy: 0.5248
Epoch 132/300
313/313 [==============================] - 13s 41ms/step - loss: 1.7637 - accuracy: 0.5254 - val_loss: 1.9002 - val_accuracy: 0.5155
Epoch 133/300
313/313 [==============================] - 13s 41ms/step - loss: 1.7569 - accuracy: 0.5257 - val_loss: 1.8880 - val_accuracy: 0.5154
Epoch 134/300
313/313 [==============================] - 13s 41ms/step - loss: 1.7453 - accuracy: 0.5293 - val_loss: 1.9432 - val_accuracy: 0.5047
Epoch 135/300
313/313 [==============================] - 13s 41ms/step - loss: 1.7420 - accuracy: 0.5326 - val_loss: 1.8820 - val_accuracy: 0.5161
Epoch 136/300
313/313 [==============================] - 13s 41ms/step - loss: 1.7375 - accuracy: 0.5317 - val_loss: 1.8834 - val_accuracy: 0.5187
Epoch 137/300
313/313 [==============================] - 13s 41ms/step - loss: 1.7237 - accuracy: 0.5320 - val_loss: 1.8616 - val_accuracy: 0.5198
Epoch 138/300
313/313 [==============================] - 13s 41ms/step - loss: 1.7194 - accuracy: 0.5377 - val_loss: 1.8569 - val_accuracy: 0.5231
Epoch 139/300
313/313 [==============================] - 13s 41ms/step - loss: 1.7097 - accuracy: 0.5393 - val_loss: 1.9154 - val_accuracy: 0.5176
Epoch 140/300
313/313 [==============================] - 13s 41ms/step - loss: 1.7007 - accuracy: 0.5362 - val_loss: 1.8557 - val_accuracy: 0.5268
Epoch 141/300
313/313 [==============================] - 13s 41ms/step - loss: 1.6958 - accuracy: 0.5417 - val_loss: 1.8801 - val_accuracy: 0.5195
Epoch 142/300
313/313 [==============================] - 13s 41ms/step - loss: 1.6914 - accuracy: 0.5425 - val_loss: 1.8480 - val_accuracy: 0.5274
Epoch 143/300
313/313 [==============================] - 13s 41ms/step - loss: 1.6855 - accuracy: 0.5444 - val_loss: 1.8782 - val_accuracy: 0.5186
Epoch 144/300
313/313 [==============================] - 13s 41ms/step - loss: 1.6721 - accuracy: 0.5460 - val_loss: 1.8633 - val_accuracy: 0.5248
Epoch 145/300
313/313 [==============================] - 13s 41ms/step - loss: 1.6657 - accuracy: 0.5487 - val_loss: 1.8514 - val_accuracy: 0.5327
Epoch 146/300
313/313 [==============================] - 13s 41ms/step - loss: 1.6630 - accuracy: 0.5488 - val_loss: 1.8742 - val_accuracy: 0.5234
Epoch 147/300
313/313 [==============================] - 13s 41ms/step - loss: 1.6506 - accuracy: 0.5523 - val_loss: 1.8639 - val_accuracy: 0.5246
Epoch 148/300
313/313 [==============================] - 13s 41ms/step - loss: 1.6433 - accuracy: 0.5547 - val_loss: 1.8434 - val_accuracy: 0.5332
Epoch 149/300
313/313 [==============================] - 13s 41ms/step - loss: 1.6383 - accuracy: 0.5561 - val_loss: 1.8293 - val_accuracy: 0.5270
Epoch 150/300
313/313 [==============================] - 13s 41ms/step - loss: 1.6294 - accuracy: 0.5583 - val_loss: 1.8568 - val_accuracy: 0.5223
Epoch 151/300
313/313 [==============================] - 13s 41ms/step - loss: 1.6204 - accuracy: 0.5593 - val_loss: 1.8763 - val_accuracy: 0.5274
Epoch 152/300
313/313 [==============================] - 13s 41ms/step - loss: 1.6155 - accuracy: 0.5607 - val_loss: 1.9046 - val_accuracy: 0.5220
Epoch 153/300
313/313 [==============================] - 13s 41ms/step - loss: 1.6047 - accuracy: 0.5638 - val_loss: 1.8406 - val_accuracy: 0.5318
Epoch 154/300
313/313 [==============================] - 13s 41ms/step - loss: 1.5970 - accuracy: 0.5641 - val_loss: 1.8509 - val_accuracy: 0.5326
Epoch 155/300
313/313 [==============================] - 13s 41ms/step - loss: 1.5996 - accuracy: 0.5649 - val_loss: 1.8367 - val_accuracy: 0.5286
Epoch 156/300
313/313 [==============================] - 13s 41ms/step - loss: 1.5850 - accuracy: 0.5671 - val_loss: 1.8869 - val_accuracy: 0.5226
Epoch 157/300
313/313 [==============================] - 13s 41ms/step - loss: 1.5784 - accuracy: 0.5699 - val_loss: 1.9306 - val_accuracy: 0.5166
Epoch 158/300
313/313 [==============================] - 13s 41ms/step - loss: 1.5692 - accuracy: 0.5707 - val_loss: 1.9302 - val_accuracy: 0.5194
Epoch 159/300
313/313 [==============================] - 13s 41ms/step - loss: 1.5699 - accuracy: 0.5716 - val_loss: 1.8805 - val_accuracy: 0.5237
Epoch 160/300
313/313 [==============================] - 13s 41ms/step - loss: 1.5534 - accuracy: 0.5752 - val_loss: 1.8412 - val_accuracy: 0.5333
Epoch 161/300
313/313 [==============================] - 13s 41ms/step - loss: 1.5501 - accuracy: 0.5771 - val_loss: 1.8483 - val_accuracy: 0.5275
Epoch 162/300
313/313 [==============================] - 13s 41ms/step - loss: 1.5442 - accuracy: 0.5780 - val_loss: 1.8753 - val_accuracy: 0.5296
Epoch 163/300
313/313 [==============================] - 13s 41ms/step - loss: 1.5370 - accuracy: 0.5788 - val_loss: 1.8506 - val_accuracy: 0.5303
Epoch 164/300
313/313 [==============================] - 13s 41ms/step - loss: 1.5296 - accuracy: 0.5805 - val_loss: 1.8778 - val_accuracy: 0.5241
Epoch 165/300
313/313 [==============================] - 13s 41ms/step - loss: 1.5220 - accuracy: 0.5822 - val_loss: 1.8603 - val_accuracy: 0.5316
Epoch 166/300
313/313 [==============================] - 13s 41ms/step - loss: 1.5196 - accuracy: 0.5856 - val_loss: 1.8728 - val_accuracy: 0.5324
Epoch 167/300
313/313 [==============================] - 13s 41ms/step - loss: 1.5041 - accuracy: 0.5866 - val_loss: 1.8465 - val_accuracy: 0.5348
Epoch 168/300
313/313 [==============================] - 13s 41ms/step - loss: 1.5012 - accuracy: 0.5894 - val_loss: 1.9091 - val_accuracy: 0.5262
Epoch 169/300
313/313 [==============================] - 13s 41ms/step - loss: 1.4946 - accuracy: 0.5894 - val_loss: 1.9140 - val_accuracy: 0.5209
Epoch 169: early stopping
313/313 [==============================] - 2s 5ms/step
visualkeras.layered_view(model)
del model
model = Sequential()
# Convolutional Layer
model.add(Conv2D(filters=32, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=32, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
# Pooling layer
model.add(MaxPooling2D(pool_size=(2, 2)))
# Dropout layers
model.add(Dropout(0.25))
model.add(Conv2D(filters=64, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=64, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
# Pooling layer
model.add(MaxPooling2D(pool_size=(2, 2)))
# Dropout layers
model.add(Dropout(0.25))
# Convolutional Layer
model.add(Conv2D(filters=128, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=128, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=128, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=128, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
# Pooling layer
model.add(MaxPooling2D(pool_size=(2, 2)))
# Dropout layers
model.add(Dropout(0.25))
# Convolutional Layer
model.add(Conv2D(filters=256, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=256, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
# Pooling layer
model.add(MaxPooling2D(pool_size=(2, 2)))
# Dropout layers
model.add(Dropout(0.25))
model.add(Flatten())
# model.add(Dropout(0.2))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(100, activation='softmax',kernel_regularizer=tensorflow.keras.regularizers.L1L2(l1=0.01,l2=0.01)))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
early_stopping = EarlyStopping(monitor='val_loss', patience=20, verbose=1)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=5, min_lr=0.001)
h_callback = model.fit(X_train, y_train, epochs = 300,validation_data=(X_val, y_val),batch_size=256,callbacks=[early_stopping])
# Plot train vs test loss during training
plot_loss(h_callback.history['loss'], h_callback.history['val_loss'])
plot_accuracy(h_callback.history['accuracy'], h_callback.history['val_accuracy'])
# evaluate model
test_loss, test_acc = model.evaluate(X_test, y_test, verbose=0)
precision,recall,f1 = get_metrics(model)
Model_scores = pd.concat([Model_scores,pd.DataFrame([['deep CNN elu sgd batchnorm dropout l1l2',test_acc,test_loss,precision,recall,f1]],columns=Model_scores.columns)],ignore_index=True)
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 32, 32, 32) 896
batch_normalization (BatchN (None, 32, 32, 32) 128
ormalization)
conv2d_1 (Conv2D) (None, 32, 32, 32) 9248
batch_normalization_1 (Batc (None, 32, 32, 32) 128
hNormalization)
max_pooling2d (MaxPooling2D (None, 16, 16, 32) 0
)
dropout (Dropout) (None, 16, 16, 32) 0
conv2d_2 (Conv2D) (None, 16, 16, 64) 18496
batch_normalization_2 (Batc (None, 16, 16, 64) 256
hNormalization)
conv2d_3 (Conv2D) (None, 16, 16, 64) 36928
batch_normalization_3 (Batc (None, 16, 16, 64) 256
hNormalization)
max_pooling2d_1 (MaxPooling (None, 8, 8, 64) 0
2D)
dropout_1 (Dropout) (None, 8, 8, 64) 0
conv2d_4 (Conv2D) (None, 8, 8, 128) 73856
batch_normalization_4 (Batc (None, 8, 8, 128) 512
hNormalization)
conv2d_5 (Conv2D) (None, 8, 8, 128) 147584
batch_normalization_5 (Batc (None, 8, 8, 128) 512
hNormalization)
conv2d_6 (Conv2D) (None, 8, 8, 128) 147584
batch_normalization_6 (Batc (None, 8, 8, 128) 512
hNormalization)
conv2d_7 (Conv2D) (None, 8, 8, 128) 147584
batch_normalization_7 (Batc (None, 8, 8, 128) 512
hNormalization)
max_pooling2d_2 (MaxPooling (None, 4, 4, 128) 0
2D)
dropout_2 (Dropout) (None, 4, 4, 128) 0
conv2d_8 (Conv2D) (None, 4, 4, 256) 295168
batch_normalization_8 (Batc (None, 4, 4, 256) 1024
hNormalization)
conv2d_9 (Conv2D) (None, 4, 4, 256) 590080
batch_normalization_9 (Batc (None, 4, 4, 256) 1024
hNormalization)
max_pooling2d_3 (MaxPooling (None, 2, 2, 256) 0
2D)
dropout_3 (Dropout) (None, 2, 2, 256) 0
flatten (Flatten) (None, 1024) 0
dense (Dense) (None, 128) 131200
dropout_4 (Dropout) (None, 128) 0
dense_1 (Dense) (None, 100) 12900
=================================================================
Total params: 1,616,388
Trainable params: 1,613,956
Non-trainable params: 2,432
_________________________________________________________________
Epoch 1/300
313/313 [==============================] - 17s 48ms/step - loss: 13.6125 - accuracy: 0.0274 - val_loss: 11.3243 - val_accuracy: 0.0235
Epoch 2/300
313/313 [==============================] - 14s 44ms/step - loss: 9.2480 - accuracy: 0.0531 - val_loss: 7.4644 - val_accuracy: 0.0787
Epoch 3/300
313/313 [==============================] - 14s 45ms/step - loss: 6.3052 - accuracy: 0.0758 - val_loss: 5.3510 - val_accuracy: 0.0842
Epoch 4/300
313/313 [==============================] - 14s 45ms/step - loss: 4.7575 - accuracy: 0.0846 - val_loss: 4.4245 - val_accuracy: 0.0824
Epoch 5/300
313/313 [==============================] - 15s 47ms/step - loss: 4.2821 - accuracy: 0.0874 - val_loss: 4.1853 - val_accuracy: 0.1007
Epoch 6/300
313/313 [==============================] - 15s 47ms/step - loss: 4.2004 - accuracy: 0.0901 - val_loss: 4.1539 - val_accuracy: 0.1113
Epoch 7/300
313/313 [==============================] - 15s 46ms/step - loss: 4.1580 - accuracy: 0.0916 - val_loss: 4.2294 - val_accuracy: 0.0836
Epoch 8/300
313/313 [==============================] - 14s 46ms/step - loss: 4.1259 - accuracy: 0.0972 - val_loss: 4.1533 - val_accuracy: 0.0950
Epoch 9/300
313/313 [==============================] - 14s 46ms/step - loss: 4.0981 - accuracy: 0.0996 - val_loss: 4.2157 - val_accuracy: 0.0835
Epoch 10/300
313/313 [==============================] - 14s 45ms/step - loss: 4.0695 - accuracy: 0.1023 - val_loss: 4.1462 - val_accuracy: 0.0972
Epoch 11/300
313/313 [==============================] - 14s 45ms/step - loss: 4.0448 - accuracy: 0.1071 - val_loss: 4.1519 - val_accuracy: 0.0971
Epoch 12/300
313/313 [==============================] - 14s 45ms/step - loss: 4.0214 - accuracy: 0.1102 - val_loss: 4.1067 - val_accuracy: 0.0997
Epoch 13/300
313/313 [==============================] - 14s 45ms/step - loss: 4.0018 - accuracy: 0.1120 - val_loss: 4.0789 - val_accuracy: 0.1135
Epoch 14/300
313/313 [==============================] - 14s 45ms/step - loss: 3.9746 - accuracy: 0.1170 - val_loss: 4.0998 - val_accuracy: 0.1082
Epoch 15/300
313/313 [==============================] - 14s 46ms/step - loss: 3.9536 - accuracy: 0.1190 - val_loss: 3.9454 - val_accuracy: 0.1210
Epoch 16/300
313/313 [==============================] - 14s 45ms/step - loss: 3.9352 - accuracy: 0.1222 - val_loss: 4.1369 - val_accuracy: 0.0959
Epoch 17/300
313/313 [==============================] - 14s 46ms/step - loss: 3.9150 - accuracy: 0.1261 - val_loss: 4.0472 - val_accuracy: 0.1130
Epoch 18/300
313/313 [==============================] - 15s 47ms/step - loss: 3.8961 - accuracy: 0.1288 - val_loss: 3.8869 - val_accuracy: 0.1264
Epoch 19/300
313/313 [==============================] - 14s 46ms/step - loss: 3.8756 - accuracy: 0.1313 - val_loss: 3.8897 - val_accuracy: 0.1302
Epoch 20/300
313/313 [==============================] - 14s 46ms/step - loss: 3.8580 - accuracy: 0.1354 - val_loss: 4.0845 - val_accuracy: 0.1038
Epoch 21/300
313/313 [==============================] - 15s 47ms/step - loss: 3.8408 - accuracy: 0.1361 - val_loss: 3.8228 - val_accuracy: 0.1399
Epoch 22/300
313/313 [==============================] - 14s 46ms/step - loss: 3.8183 - accuracy: 0.1399 - val_loss: 3.8913 - val_accuracy: 0.1248
Epoch 23/300
313/313 [==============================] - 14s 45ms/step - loss: 3.8026 - accuracy: 0.1403 - val_loss: 3.7223 - val_accuracy: 0.1588
Epoch 24/300
313/313 [==============================] - 14s 46ms/step - loss: 3.7864 - accuracy: 0.1443 - val_loss: 3.7772 - val_accuracy: 0.1454
Epoch 25/300
313/313 [==============================] - 14s 45ms/step - loss: 3.7688 - accuracy: 0.1473 - val_loss: 3.7481 - val_accuracy: 0.1475
Epoch 26/300
313/313 [==============================] - 14s 46ms/step - loss: 3.7553 - accuracy: 0.1467 - val_loss: 3.6356 - val_accuracy: 0.1661
Epoch 27/300
313/313 [==============================] - 14s 46ms/step - loss: 3.7373 - accuracy: 0.1523 - val_loss: 3.7361 - val_accuracy: 0.1498
Epoch 28/300
313/313 [==============================] - 14s 45ms/step - loss: 3.7185 - accuracy: 0.1548 - val_loss: 3.7651 - val_accuracy: 0.1522
Epoch 29/300
313/313 [==============================] - 14s 46ms/step - loss: 3.7082 - accuracy: 0.1572 - val_loss: 3.5722 - val_accuracy: 0.1903
Epoch 30/300
313/313 [==============================] - 15s 46ms/step - loss: 3.6918 - accuracy: 0.1591 - val_loss: 3.5789 - val_accuracy: 0.1736
Epoch 31/300
313/313 [==============================] - 14s 45ms/step - loss: 3.6796 - accuracy: 0.1611 - val_loss: 3.5679 - val_accuracy: 0.1769
Epoch 32/300
313/313 [==============================] - 14s 44ms/step - loss: 3.6621 - accuracy: 0.1637 - val_loss: 3.6382 - val_accuracy: 0.1733
Epoch 33/300
313/313 [==============================] - 14s 44ms/step - loss: 3.6462 - accuracy: 0.1693 - val_loss: 3.5870 - val_accuracy: 0.1782
Epoch 34/300
313/313 [==============================] - 14s 43ms/step - loss: 3.6311 - accuracy: 0.1717 - val_loss: 3.4966 - val_accuracy: 0.1926
Epoch 35/300
313/313 [==============================] - 14s 45ms/step - loss: 3.6136 - accuracy: 0.1758 - val_loss: 3.6132 - val_accuracy: 0.1623
Epoch 36/300
313/313 [==============================] - 14s 43ms/step - loss: 3.6017 - accuracy: 0.1750 - val_loss: 3.4681 - val_accuracy: 0.2059
Epoch 37/300
313/313 [==============================] - 13s 42ms/step - loss: 3.5861 - accuracy: 0.1790 - val_loss: 3.5388 - val_accuracy: 0.1904
Epoch 38/300
313/313 [==============================] - 14s 43ms/step - loss: 3.5776 - accuracy: 0.1814 - val_loss: 3.3981 - val_accuracy: 0.2042
Epoch 39/300
313/313 [==============================] - 14s 44ms/step - loss: 3.5605 - accuracy: 0.1823 - val_loss: 3.4669 - val_accuracy: 0.2048
Epoch 40/300
313/313 [==============================] - 14s 45ms/step - loss: 3.5462 - accuracy: 0.1868 - val_loss: 3.5358 - val_accuracy: 0.1903
Epoch 41/300
313/313 [==============================] - 14s 46ms/step - loss: 3.5374 - accuracy: 0.1892 - val_loss: 3.4541 - val_accuracy: 0.1941
Epoch 42/300
313/313 [==============================] - 14s 46ms/step - loss: 3.5208 - accuracy: 0.1914 - val_loss: 3.4505 - val_accuracy: 0.1945
Epoch 43/300
313/313 [==============================] - 14s 46ms/step - loss: 3.5104 - accuracy: 0.1940 - val_loss: 3.3625 - val_accuracy: 0.2161
Epoch 44/300
313/313 [==============================] - 14s 46ms/step - loss: 3.4945 - accuracy: 0.1980 - val_loss: 3.2776 - val_accuracy: 0.2386
Epoch 45/300
313/313 [==============================] - 14s 46ms/step - loss: 3.4792 - accuracy: 0.2014 - val_loss: 3.3539 - val_accuracy: 0.2338
Epoch 46/300
313/313 [==============================] - 14s 45ms/step - loss: 3.4698 - accuracy: 0.2010 - val_loss: 3.3798 - val_accuracy: 0.2139
Epoch 47/300
313/313 [==============================] - 14s 44ms/step - loss: 3.4565 - accuracy: 0.2037 - val_loss: 3.2625 - val_accuracy: 0.2380
Epoch 48/300
313/313 [==============================] - 14s 44ms/step - loss: 3.4447 - accuracy: 0.2061 - val_loss: 3.4714 - val_accuracy: 0.2145
Epoch 49/300
313/313 [==============================] - 14s 44ms/step - loss: 3.4327 - accuracy: 0.2077 - val_loss: 3.2222 - val_accuracy: 0.2465
Epoch 50/300
313/313 [==============================] - 14s 44ms/step - loss: 3.4200 - accuracy: 0.2115 - val_loss: 3.1820 - val_accuracy: 0.2597
Epoch 51/300
313/313 [==============================] - 14s 44ms/step - loss: 3.4014 - accuracy: 0.2163 - val_loss: 3.2546 - val_accuracy: 0.2427
Epoch 52/300
313/313 [==============================] - 14s 44ms/step - loss: 3.3925 - accuracy: 0.2165 - val_loss: 3.3581 - val_accuracy: 0.2170
Epoch 53/300
313/313 [==============================] - 14s 45ms/step - loss: 3.3865 - accuracy: 0.2182 - val_loss: 3.1798 - val_accuracy: 0.2492
Epoch 54/300
313/313 [==============================] - 14s 45ms/step - loss: 3.3802 - accuracy: 0.2203 - val_loss: 3.2008 - val_accuracy: 0.2548
Epoch 55/300
313/313 [==============================] - 14s 45ms/step - loss: 3.3637 - accuracy: 0.2229 - val_loss: 3.2631 - val_accuracy: 0.2494
Epoch 56/300
313/313 [==============================] - 14s 45ms/step - loss: 3.3545 - accuracy: 0.2266 - val_loss: 3.2825 - val_accuracy: 0.2354
Epoch 57/300
313/313 [==============================] - 14s 45ms/step - loss: 3.3455 - accuracy: 0.2267 - val_loss: 3.1540 - val_accuracy: 0.2529
Epoch 58/300
313/313 [==============================] - 14s 44ms/step - loss: 3.3322 - accuracy: 0.2261 - val_loss: 3.1729 - val_accuracy: 0.2499
Epoch 59/300
313/313 [==============================] - 874s 3s/step - loss: 3.3274 - accuracy: 0.2303 - val_loss: 3.0690 - val_accuracy: 0.2664
Epoch 60/300
313/313 [==============================] - 65s 209ms/step - loss: 3.3127 - accuracy: 0.2327 - val_loss: 3.0908 - val_accuracy: 0.2665
Epoch 61/300
313/313 [==============================] - 76s 244ms/step - loss: 3.2994 - accuracy: 0.2348 - val_loss: 3.1223 - val_accuracy: 0.2737
Epoch 62/300
313/313 [==============================] - 156s 494ms/step - loss: 3.2905 - accuracy: 0.2382 - val_loss: 3.0357 - val_accuracy: 0.2669
Epoch 63/300
313/313 [==============================] - 98s 312ms/step - loss: 3.2808 - accuracy: 0.2389 - val_loss: 3.0672 - val_accuracy: 0.2819
Epoch 64/300
313/313 [==============================] - 192s 617ms/step - loss: 3.2724 - accuracy: 0.2386 - val_loss: 3.0298 - val_accuracy: 0.2812
Epoch 65/300
313/313 [==============================] - 218s 699ms/step - loss: 3.2601 - accuracy: 0.2428 - val_loss: 3.0197 - val_accuracy: 0.2831
Epoch 66/300
313/313 [==============================] - 198s 634ms/step - loss: 3.2582 - accuracy: 0.2432 - val_loss: 3.0143 - val_accuracy: 0.2933
Epoch 67/300
313/313 [==============================] - 254s 815ms/step - loss: 3.2420 - accuracy: 0.2474 - val_loss: 3.0306 - val_accuracy: 0.2812
Epoch 68/300
313/313 [==============================] - 188s 591ms/step - loss: 3.2340 - accuracy: 0.2474 - val_loss: 3.0183 - val_accuracy: 0.2825
Epoch 69/300
313/313 [==============================] - 232s 726ms/step - loss: 3.2257 - accuracy: 0.2494 - val_loss: 3.0411 - val_accuracy: 0.2831
Epoch 70/300
313/313 [==============================] - 83s 261ms/step - loss: 3.2146 - accuracy: 0.2529 - val_loss: 2.9525 - val_accuracy: 0.3094
Epoch 71/300
313/313 [==============================] - 70s 225ms/step - loss: 3.2046 - accuracy: 0.2536 - val_loss: 3.0113 - val_accuracy: 0.2949
Epoch 72/300
313/313 [==============================] - 154s 492ms/step - loss: 3.1946 - accuracy: 0.2599 - val_loss: 2.9811 - val_accuracy: 0.3029
Epoch 73/300
313/313 [==============================] - 15s 48ms/step - loss: 3.1911 - accuracy: 0.2560 - val_loss: 2.9358 - val_accuracy: 0.3057
Epoch 74/300
313/313 [==============================] - 27s 86ms/step - loss: 3.1818 - accuracy: 0.2575 - val_loss: 2.9535 - val_accuracy: 0.2964
Epoch 75/300
313/313 [==============================] - 34s 108ms/step - loss: 3.1717 - accuracy: 0.2617 - val_loss: 2.9014 - val_accuracy: 0.3076
Epoch 76/300
313/313 [==============================] - 28s 90ms/step - loss: 3.1635 - accuracy: 0.2616 - val_loss: 2.9589 - val_accuracy: 0.2958
Epoch 77/300
313/313 [==============================] - 46s 147ms/step - loss: 3.1503 - accuracy: 0.2628 - val_loss: 2.8999 - val_accuracy: 0.3217
Epoch 78/300
313/313 [==============================] - 55s 176ms/step - loss: 3.1446 - accuracy: 0.2649 - val_loss: 3.0374 - val_accuracy: 0.2881
Epoch 79/300
313/313 [==============================] - 49s 158ms/step - loss: 3.1399 - accuracy: 0.2669 - val_loss: 2.9515 - val_accuracy: 0.3046
Epoch 80/300
313/313 [==============================] - 31s 100ms/step - loss: 3.1272 - accuracy: 0.2688 - val_loss: 2.9571 - val_accuracy: 0.2964
Epoch 81/300
313/313 [==============================] - 24s 78ms/step - loss: 3.1177 - accuracy: 0.2708 - val_loss: 2.9396 - val_accuracy: 0.3088
Epoch 82/300
313/313 [==============================] - 24s 78ms/step - loss: 3.1090 - accuracy: 0.2734 - val_loss: 2.8708 - val_accuracy: 0.3205
Epoch 83/300
313/313 [==============================] - 25s 78ms/step - loss: 3.1010 - accuracy: 0.2742 - val_loss: 2.8072 - val_accuracy: 0.3378
Epoch 84/300
313/313 [==============================] - 24s 76ms/step - loss: 3.0947 - accuracy: 0.2760 - val_loss: 2.8277 - val_accuracy: 0.3304
Epoch 85/300
313/313 [==============================] - 24s 76ms/step - loss: 3.0877 - accuracy: 0.2767 - val_loss: 2.7615 - val_accuracy: 0.3415
Epoch 86/300
313/313 [==============================] - 24s 76ms/step - loss: 3.0769 - accuracy: 0.2803 - val_loss: 2.9803 - val_accuracy: 0.3061
Epoch 87/300
313/313 [==============================] - 23s 75ms/step - loss: 3.0672 - accuracy: 0.2791 - val_loss: 2.8368 - val_accuracy: 0.3169
Epoch 88/300
313/313 [==============================] - 24s 77ms/step - loss: 3.0638 - accuracy: 0.2809 - val_loss: 2.8951 - val_accuracy: 0.3104
Epoch 89/300
313/313 [==============================] - 24s 76ms/step - loss: 3.0520 - accuracy: 0.2840 - val_loss: 2.7393 - val_accuracy: 0.3491
Epoch 90/300
313/313 [==============================] - 24s 77ms/step - loss: 3.0468 - accuracy: 0.2849 - val_loss: 2.7547 - val_accuracy: 0.3580
Epoch 91/300
313/313 [==============================] - 25s 78ms/step - loss: 3.0430 - accuracy: 0.2873 - val_loss: 2.9431 - val_accuracy: 0.2959
Epoch 92/300
313/313 [==============================] - 24s 77ms/step - loss: 3.0299 - accuracy: 0.2900 - val_loss: 2.7963 - val_accuracy: 0.3430
Epoch 93/300
313/313 [==============================] - 24s 76ms/step - loss: 3.0209 - accuracy: 0.2907 - val_loss: 2.7856 - val_accuracy: 0.3395
Epoch 94/300
313/313 [==============================] - 24s 77ms/step - loss: 3.0047 - accuracy: 0.2940 - val_loss: 2.7104 - val_accuracy: 0.3515
Epoch 95/300
313/313 [==============================] - 25s 80ms/step - loss: 3.0088 - accuracy: 0.2927 - val_loss: 2.7650 - val_accuracy: 0.3369
Epoch 96/300
313/313 [==============================] - 24s 76ms/step - loss: 2.9960 - accuracy: 0.2943 - val_loss: 2.8057 - val_accuracy: 0.3337
Epoch 97/300
313/313 [==============================] - 24s 76ms/step - loss: 2.9911 - accuracy: 0.2949 - val_loss: 2.7409 - val_accuracy: 0.3397
Epoch 98/300
313/313 [==============================] - 24s 75ms/step - loss: 2.9805 - accuracy: 0.2991 - val_loss: 2.7215 - val_accuracy: 0.3479
Epoch 99/300
313/313 [==============================] - 23s 75ms/step - loss: 2.9737 - accuracy: 0.2990 - val_loss: 2.7342 - val_accuracy: 0.3384
Epoch 100/300
313/313 [==============================] - 24s 76ms/step - loss: 2.9620 - accuracy: 0.3031 - val_loss: 2.8989 - val_accuracy: 0.3145
Epoch 101/300
313/313 [==============================] - 25s 79ms/step - loss: 2.9550 - accuracy: 0.3040 - val_loss: 2.6785 - val_accuracy: 0.3593
Epoch 102/300
313/313 [==============================] - 24s 77ms/step - loss: 2.9465 - accuracy: 0.3043 - val_loss: 2.7249 - val_accuracy: 0.3506
Epoch 103/300
313/313 [==============================] - 24s 76ms/step - loss: 2.9427 - accuracy: 0.3082 - val_loss: 2.7305 - val_accuracy: 0.3479
Epoch 104/300
313/313 [==============================] - 23s 74ms/step - loss: 2.9333 - accuracy: 0.3094 - val_loss: 2.7601 - val_accuracy: 0.3552
Epoch 105/300
313/313 [==============================] - 24s 78ms/step - loss: 2.9342 - accuracy: 0.3100 - val_loss: 2.7258 - val_accuracy: 0.3431
Epoch 106/300
313/313 [==============================] - 24s 75ms/step - loss: 2.9170 - accuracy: 0.3130 - val_loss: 2.6625 - val_accuracy: 0.3650
Epoch 107/300
313/313 [==============================] - 23s 73ms/step - loss: 2.9112 - accuracy: 0.3139 - val_loss: 2.7170 - val_accuracy: 0.3630
Epoch 108/300
313/313 [==============================] - 24s 75ms/step - loss: 2.9103 - accuracy: 0.3146 - val_loss: 2.6500 - val_accuracy: 0.3607
Epoch 109/300
313/313 [==============================] - 24s 76ms/step - loss: 2.9009 - accuracy: 0.3144 - val_loss: 2.6507 - val_accuracy: 0.3704
Epoch 110/300
313/313 [==============================] - 24s 76ms/step - loss: 2.8857 - accuracy: 0.3174 - val_loss: 2.5897 - val_accuracy: 0.3863
Epoch 111/300
313/313 [==============================] - 24s 76ms/step - loss: 2.8835 - accuracy: 0.3196 - val_loss: 2.6076 - val_accuracy: 0.3760
Epoch 112/300
313/313 [==============================] - 25s 79ms/step - loss: 2.8763 - accuracy: 0.3224 - val_loss: 2.6809 - val_accuracy: 0.3697
Epoch 113/300
313/313 [==============================] - 25s 79ms/step - loss: 2.8721 - accuracy: 0.3218 - val_loss: 2.7268 - val_accuracy: 0.3581
Epoch 114/300
313/313 [==============================] - 23s 75ms/step - loss: 2.8589 - accuracy: 0.3228 - val_loss: 2.6298 - val_accuracy: 0.3761
Epoch 115/300
313/313 [==============================] - 23s 74ms/step - loss: 2.8560 - accuracy: 0.3263 - val_loss: 2.6763 - val_accuracy: 0.3659
Epoch 116/300
313/313 [==============================] - 24s 77ms/step - loss: 2.8495 - accuracy: 0.3277 - val_loss: 2.6891 - val_accuracy: 0.3537
Epoch 117/300
313/313 [==============================] - 25s 78ms/step - loss: 2.8354 - accuracy: 0.3293 - val_loss: 2.5972 - val_accuracy: 0.3813
Epoch 118/300
313/313 [==============================] - 23s 75ms/step - loss: 2.8336 - accuracy: 0.3314 - val_loss: 2.5905 - val_accuracy: 0.3830
Epoch 119/300
313/313 [==============================] - 24s 77ms/step - loss: 2.8174 - accuracy: 0.3332 - val_loss: 2.5930 - val_accuracy: 0.3762
Epoch 120/300
313/313 [==============================] - 24s 76ms/step - loss: 2.8199 - accuracy: 0.3322 - val_loss: 2.5958 - val_accuracy: 0.3841
Epoch 121/300
313/313 [==============================] - 24s 76ms/step - loss: 2.8125 - accuracy: 0.3348 - val_loss: 2.4908 - val_accuracy: 0.4073
Epoch 122/300
313/313 [==============================] - 24s 77ms/step - loss: 2.8033 - accuracy: 0.3369 - val_loss: 2.6089 - val_accuracy: 0.3799
Epoch 123/300
313/313 [==============================] - 24s 76ms/step - loss: 2.7959 - accuracy: 0.3381 - val_loss: 2.6186 - val_accuracy: 0.3858
Epoch 124/300
313/313 [==============================] - 24s 77ms/step - loss: 2.7899 - accuracy: 0.3398 - val_loss: 2.5758 - val_accuracy: 0.3864
Epoch 125/300
313/313 [==============================] - 24s 76ms/step - loss: 2.7820 - accuracy: 0.3410 - val_loss: 2.6413 - val_accuracy: 0.3900
Epoch 126/300
313/313 [==============================] - 23s 75ms/step - loss: 2.7692 - accuracy: 0.3469 - val_loss: 2.5783 - val_accuracy: 0.3880
Epoch 127/300
313/313 [==============================] - 23s 74ms/step - loss: 2.7653 - accuracy: 0.3448 - val_loss: 2.5054 - val_accuracy: 0.4067
Epoch 128/300
313/313 [==============================] - 24s 77ms/step - loss: 2.7655 - accuracy: 0.3449 - val_loss: 2.5956 - val_accuracy: 0.3824
Epoch 129/300
313/313 [==============================] - 24s 77ms/step - loss: 2.7488 - accuracy: 0.3498 - val_loss: 2.5227 - val_accuracy: 0.3981
Epoch 130/300
313/313 [==============================] - 24s 75ms/step - loss: 2.7494 - accuracy: 0.3482 - val_loss: 2.5223 - val_accuracy: 0.4042
Epoch 131/300
313/313 [==============================] - 25s 79ms/step - loss: 2.7365 - accuracy: 0.3512 - val_loss: 2.6034 - val_accuracy: 0.3840
Epoch 132/300
313/313 [==============================] - 24s 77ms/step - loss: 2.7366 - accuracy: 0.3517 - val_loss: 2.5241 - val_accuracy: 0.3957
Epoch 133/300
313/313 [==============================] - 25s 79ms/step - loss: 2.7266 - accuracy: 0.3551 - val_loss: 2.5602 - val_accuracy: 0.3866
Epoch 134/300
313/313 [==============================] - 24s 75ms/step - loss: 2.7169 - accuracy: 0.3557 - val_loss: 2.4829 - val_accuracy: 0.4093
Epoch 135/300
313/313 [==============================] - 24s 75ms/step - loss: 2.7062 - accuracy: 0.3585 - val_loss: 2.5194 - val_accuracy: 0.4053
Epoch 136/300
313/313 [==============================] - 24s 78ms/step - loss: 2.7090 - accuracy: 0.3585 - val_loss: 2.4844 - val_accuracy: 0.4107
Epoch 137/300
313/313 [==============================] - 24s 78ms/step - loss: 2.6939 - accuracy: 0.3610 - val_loss: 2.5258 - val_accuracy: 0.3893
Epoch 138/300
313/313 [==============================] - 24s 75ms/step - loss: 2.6906 - accuracy: 0.3617 - val_loss: 2.5607 - val_accuracy: 0.3878
Epoch 139/300
313/313 [==============================] - 24s 77ms/step - loss: 2.6802 - accuracy: 0.3652 - val_loss: 2.5027 - val_accuracy: 0.4118
Epoch 140/300
313/313 [==============================] - 23s 74ms/step - loss: 2.6789 - accuracy: 0.3655 - val_loss: 2.5623 - val_accuracy: 0.3863
Epoch 141/300
313/313 [==============================] - 23s 74ms/step - loss: 2.6661 - accuracy: 0.3681 - val_loss: 2.4719 - val_accuracy: 0.4198
Epoch 142/300
313/313 [==============================] - 47s 151ms/step - loss: 2.6615 - accuracy: 0.3688 - val_loss: 2.4607 - val_accuracy: 0.4115
Epoch 143/300
313/313 [==============================] - 47s 149ms/step - loss: 2.6525 - accuracy: 0.3698 - val_loss: 2.5474 - val_accuracy: 0.4120
Epoch 144/300
313/313 [==============================] - 171s 548ms/step - loss: 2.6491 - accuracy: 0.3705 - val_loss: 2.5039 - val_accuracy: 0.4038
Epoch 145/300
313/313 [==============================] - 61s 197ms/step - loss: 2.6408 - accuracy: 0.3743 - val_loss: 2.4832 - val_accuracy: 0.4148
Epoch 146/300
313/313 [==============================] - 231s 740ms/step - loss: 2.6332 - accuracy: 0.3756 - val_loss: 2.5069 - val_accuracy: 0.4135
Epoch 147/300
313/313 [==============================] - 29s 93ms/step - loss: 2.6284 - accuracy: 0.3749 - val_loss: 2.4933 - val_accuracy: 0.4159
Epoch 148/300
313/313 [==============================] - 28s 90ms/step - loss: 2.6191 - accuracy: 0.3767 - val_loss: 2.5280 - val_accuracy: 0.4054
Epoch 149/300
313/313 [==============================] - 28s 89ms/step - loss: 2.6119 - accuracy: 0.3814 - val_loss: 2.3885 - val_accuracy: 0.4420
Epoch 150/300
313/313 [==============================] - 30s 95ms/step - loss: 2.6074 - accuracy: 0.3835 - val_loss: 2.4971 - val_accuracy: 0.4125
Epoch 151/300
313/313 [==============================] - 26s 84ms/step - loss: 2.6003 - accuracy: 0.3821 - val_loss: 2.4230 - val_accuracy: 0.4266
Epoch 152/300
313/313 [==============================] - 28s 88ms/step - loss: 2.5929 - accuracy: 0.3832 - val_loss: 2.4337 - val_accuracy: 0.4286
Epoch 153/300
313/313 [==============================] - 30s 96ms/step - loss: 2.5812 - accuracy: 0.3870 - val_loss: 2.4415 - val_accuracy: 0.4258
Epoch 154/300
313/313 [==============================] - 35s 111ms/step - loss: 2.5791 - accuracy: 0.3882 - val_loss: 2.4708 - val_accuracy: 0.4261
Epoch 155/300
313/313 [==============================] - 28s 91ms/step - loss: 2.5794 - accuracy: 0.3884 - val_loss: 2.4261 - val_accuracy: 0.4288
Epoch 156/300
313/313 [==============================] - 31s 99ms/step - loss: 2.5637 - accuracy: 0.3925 - val_loss: 2.4416 - val_accuracy: 0.4221
Epoch 157/300
313/313 [==============================] - 30s 98ms/step - loss: 2.5647 - accuracy: 0.3912 - val_loss: 2.4154 - val_accuracy: 0.4355
Epoch 158/300
313/313 [==============================] - 26s 83ms/step - loss: 2.5559 - accuracy: 0.3912 - val_loss: 2.3988 - val_accuracy: 0.4348
Epoch 159/300
313/313 [==============================] - 33s 105ms/step - loss: 2.5453 - accuracy: 0.3935 - val_loss: 2.3837 - val_accuracy: 0.4366
Epoch 160/300
313/313 [==============================] - 29s 91ms/step - loss: 2.5396 - accuracy: 0.3955 - val_loss: 2.3844 - val_accuracy: 0.4468
Epoch 161/300
313/313 [==============================] - 27s 87ms/step - loss: 2.5310 - accuracy: 0.3993 - val_loss: 2.4092 - val_accuracy: 0.4341
Epoch 162/300
313/313 [==============================] - 29s 93ms/step - loss: 2.5204 - accuracy: 0.4019 - val_loss: 2.4215 - val_accuracy: 0.4405
Epoch 163/300
313/313 [==============================] - 31s 98ms/step - loss: 2.5195 - accuracy: 0.4012 - val_loss: 2.4173 - val_accuracy: 0.4300
Epoch 164/300
313/313 [==============================] - 35s 111ms/step - loss: 2.5160 - accuracy: 0.4017 - val_loss: 2.4282 - val_accuracy: 0.4287
Epoch 165/300
313/313 [==============================] - 29s 93ms/step - loss: 2.5067 - accuracy: 0.4029 - val_loss: 2.4498 - val_accuracy: 0.4261
Epoch 166/300
313/313 [==============================] - 31s 98ms/step - loss: 2.5047 - accuracy: 0.4045 - val_loss: 2.3752 - val_accuracy: 0.4400
Epoch 167/300
313/313 [==============================] - 30s 94ms/step - loss: 2.4996 - accuracy: 0.4047 - val_loss: 2.3900 - val_accuracy: 0.4413
Epoch 168/300
313/313 [==============================] - 30s 95ms/step - loss: 2.4864 - accuracy: 0.4108 - val_loss: 2.3739 - val_accuracy: 0.4351
Epoch 169/300
313/313 [==============================] - 30s 96ms/step - loss: 2.4744 - accuracy: 0.4099 - val_loss: 2.5293 - val_accuracy: 0.4083
Epoch 170/300
313/313 [==============================] - 30s 96ms/step - loss: 2.4681 - accuracy: 0.4126 - val_loss: 2.4265 - val_accuracy: 0.4372
Epoch 171/300
313/313 [==============================] - 26s 83ms/step - loss: 2.4682 - accuracy: 0.4132 - val_loss: 2.4376 - val_accuracy: 0.4282
Epoch 172/300
313/313 [==============================] - 31s 98ms/step - loss: 2.4582 - accuracy: 0.4166 - val_loss: 2.3674 - val_accuracy: 0.4480
Epoch 173/300
313/313 [==============================] - 29s 94ms/step - loss: 2.4453 - accuracy: 0.4176 - val_loss: 2.4326 - val_accuracy: 0.4315
Epoch 174/300
313/313 [==============================] - 28s 89ms/step - loss: 2.4402 - accuracy: 0.4188 - val_loss: 2.3780 - val_accuracy: 0.4526
Epoch 175/300
313/313 [==============================] - 121s 388ms/step - loss: 2.4382 - accuracy: 0.4199 - val_loss: 2.4076 - val_accuracy: 0.4398
Epoch 176/300
313/313 [==============================] - 36s 115ms/step - loss: 2.4289 - accuracy: 0.4221 - val_loss: 2.4553 - val_accuracy: 0.4331
Epoch 177/300
313/313 [==============================] - 33s 105ms/step - loss: 2.4334 - accuracy: 0.4206 - val_loss: 2.3568 - val_accuracy: 0.4508
Epoch 178/300
313/313 [==============================] - 34s 108ms/step - loss: 2.4212 - accuracy: 0.4261 - val_loss: 2.3640 - val_accuracy: 0.4457
Epoch 179/300
313/313 [==============================] - 21s 67ms/step - loss: 2.4101 - accuracy: 0.4268 - val_loss: 2.3538 - val_accuracy: 0.4519
Epoch 180/300
313/313 [==============================] - 21s 69ms/step - loss: 2.4084 - accuracy: 0.4264 - val_loss: 2.3947 - val_accuracy: 0.4428
Epoch 181/300
313/313 [==============================] - 26s 83ms/step - loss: 2.3989 - accuracy: 0.4308 - val_loss: 2.3858 - val_accuracy: 0.4516
Epoch 182/300
313/313 [==============================] - 27s 85ms/step - loss: 2.3976 - accuracy: 0.4301 - val_loss: 2.4358 - val_accuracy: 0.4243
Epoch 183/300
313/313 [==============================] - 89s 286ms/step - loss: 2.3887 - accuracy: 0.4309 - val_loss: 2.3479 - val_accuracy: 0.4509
Epoch 184/300
313/313 [==============================] - 41s 130ms/step - loss: 2.3793 - accuracy: 0.4330 - val_loss: 2.4563 - val_accuracy: 0.4426
Epoch 185/300
313/313 [==============================] - 24s 75ms/step - loss: 2.3782 - accuracy: 0.4354 - val_loss: 2.4095 - val_accuracy: 0.4387
Epoch 186/300
313/313 [==============================] - 23s 75ms/step - loss: 2.3682 - accuracy: 0.4393 - val_loss: 2.3580 - val_accuracy: 0.4435
Epoch 187/300
313/313 [==============================] - 24s 75ms/step - loss: 2.3583 - accuracy: 0.4401 - val_loss: 2.4197 - val_accuracy: 0.4445
Epoch 188/300
313/313 [==============================] - 24s 78ms/step - loss: 2.3521 - accuracy: 0.4399 - val_loss: 2.3844 - val_accuracy: 0.4557
Epoch 189/300
313/313 [==============================] - 24s 78ms/step - loss: 2.3494 - accuracy: 0.4413 - val_loss: 2.4528 - val_accuracy: 0.4464
Epoch 190/300
313/313 [==============================] - 23s 75ms/step - loss: 2.3408 - accuracy: 0.4428 - val_loss: 2.3941 - val_accuracy: 0.4506
Epoch 191/300
313/313 [==============================] - 25s 81ms/step - loss: 2.3275 - accuracy: 0.4439 - val_loss: 2.3448 - val_accuracy: 0.4618
Epoch 192/300
313/313 [==============================] - 24s 75ms/step - loss: 2.3357 - accuracy: 0.4438 - val_loss: 2.3109 - val_accuracy: 0.4661
Epoch 193/300
313/313 [==============================] - 24s 76ms/step - loss: 2.3222 - accuracy: 0.4471 - val_loss: 2.3894 - val_accuracy: 0.4479
Epoch 194/300
313/313 [==============================] - 24s 77ms/step - loss: 2.3171 - accuracy: 0.4477 - val_loss: 2.3524 - val_accuracy: 0.4635
Epoch 195/300
313/313 [==============================] - 25s 80ms/step - loss: 2.3099 - accuracy: 0.4508 - val_loss: 2.4519 - val_accuracy: 0.4490
Epoch 196/300
313/313 [==============================] - 24s 78ms/step - loss: 2.3039 - accuracy: 0.4518 - val_loss: 2.4114 - val_accuracy: 0.4487
Epoch 197/300
313/313 [==============================] - 24s 77ms/step - loss: 2.2998 - accuracy: 0.4535 - val_loss: 2.3604 - val_accuracy: 0.4529
Epoch 198/300
313/313 [==============================] - 23s 74ms/step - loss: 2.2904 - accuracy: 0.4526 - val_loss: 2.3672 - val_accuracy: 0.4612
Epoch 199/300
313/313 [==============================] - 25s 81ms/step - loss: 2.2936 - accuracy: 0.4535 - val_loss: 2.4299 - val_accuracy: 0.4402
Epoch 200/300
313/313 [==============================] - 24s 77ms/step - loss: 2.2793 - accuracy: 0.4580 - val_loss: 2.3294 - val_accuracy: 0.4611
Epoch 201/300
313/313 [==============================] - 23s 75ms/step - loss: 2.2753 - accuracy: 0.4572 - val_loss: 2.3608 - val_accuracy: 0.4681
Epoch 202/300
313/313 [==============================] - 24s 78ms/step - loss: 2.2656 - accuracy: 0.4617 - val_loss: 2.3302 - val_accuracy: 0.4666
Epoch 203/300
313/313 [==============================] - 24s 78ms/step - loss: 2.2602 - accuracy: 0.4646 - val_loss: 2.3044 - val_accuracy: 0.4738
Epoch 204/300
313/313 [==============================] - 24s 75ms/step - loss: 2.2567 - accuracy: 0.4612 - val_loss: 2.3530 - val_accuracy: 0.4530
Epoch 205/300
313/313 [==============================] - 24s 77ms/step - loss: 2.2508 - accuracy: 0.4653 - val_loss: 2.3841 - val_accuracy: 0.4621
Epoch 206/300
313/313 [==============================] - 25s 81ms/step - loss: 2.2334 - accuracy: 0.4725 - val_loss: 2.4000 - val_accuracy: 0.4548
Epoch 207/300
313/313 [==============================] - 24s 76ms/step - loss: 2.2343 - accuracy: 0.4692 - val_loss: 2.3591 - val_accuracy: 0.4780
Epoch 208/300
313/313 [==============================] - 24s 78ms/step - loss: 2.2381 - accuracy: 0.4678 - val_loss: 2.3470 - val_accuracy: 0.4613
Epoch 209/300
313/313 [==============================] - 25s 79ms/step - loss: 2.2283 - accuracy: 0.4712 - val_loss: 2.2999 - val_accuracy: 0.4767
Epoch 210/300
313/313 [==============================] - 24s 76ms/step - loss: 2.2214 - accuracy: 0.4719 - val_loss: 2.3503 - val_accuracy: 0.4680
Epoch 211/300
313/313 [==============================] - 24s 77ms/step - loss: 2.2141 - accuracy: 0.4745 - val_loss: 2.4172 - val_accuracy: 0.4485
Epoch 212/300
313/313 [==============================] - 25s 81ms/step - loss: 2.2020 - accuracy: 0.4746 - val_loss: 2.3096 - val_accuracy: 0.4728
Epoch 213/300
313/313 [==============================] - 24s 77ms/step - loss: 2.2006 - accuracy: 0.4766 - val_loss: 2.3646 - val_accuracy: 0.4754
Epoch 214/300
313/313 [==============================] - 24s 78ms/step - loss: 2.2018 - accuracy: 0.4753 - val_loss: 2.2870 - val_accuracy: 0.4789
Epoch 215/300
313/313 [==============================] - 24s 76ms/step - loss: 2.1862 - accuracy: 0.4807 - val_loss: 2.3882 - val_accuracy: 0.4668
Epoch 216/300
313/313 [==============================] - 25s 79ms/step - loss: 2.1856 - accuracy: 0.4800 - val_loss: 2.4756 - val_accuracy: 0.4436
Epoch 217/300
313/313 [==============================] - 84s 269ms/step - loss: 2.1782 - accuracy: 0.4818 - val_loss: 2.4400 - val_accuracy: 0.4531
Epoch 218/300
313/313 [==============================] - 190s 609ms/step - loss: 2.1702 - accuracy: 0.4845 - val_loss: 2.3238 - val_accuracy: 0.4801
Epoch 219/300
313/313 [==============================] - 148s 475ms/step - loss: 2.1636 - accuracy: 0.4851 - val_loss: 2.3310 - val_accuracy: 0.4723
Epoch 220/300
313/313 [==============================] - 30s 94ms/step - loss: 2.1606 - accuracy: 0.4870 - val_loss: 2.3719 - val_accuracy: 0.4753
Epoch 221/300
313/313 [==============================] - 27s 86ms/step - loss: 2.1578 - accuracy: 0.4881 - val_loss: 2.3999 - val_accuracy: 0.4679
Epoch 222/300
313/313 [==============================] - 29s 93ms/step - loss: 2.1542 - accuracy: 0.4907 - val_loss: 2.3379 - val_accuracy: 0.4764
Epoch 223/300
313/313 [==============================] - 29s 94ms/step - loss: 2.1408 - accuracy: 0.4905 - val_loss: 2.3899 - val_accuracy: 0.4622
Epoch 224/300
313/313 [==============================] - 559s 2s/step - loss: 2.1355 - accuracy: 0.4937 - val_loss: 2.3563 - val_accuracy: 0.4742
Epoch 225/300
313/313 [==============================] - 92s 293ms/step - loss: 2.1225 - accuracy: 0.4975 - val_loss: 2.3452 - val_accuracy: 0.4756
Epoch 226/300
313/313 [==============================] - 23s 74ms/step - loss: 2.1255 - accuracy: 0.4961 - val_loss: 2.3467 - val_accuracy: 0.4724
Epoch 227/300
313/313 [==============================] - 23s 73ms/step - loss: 2.1249 - accuracy: 0.4972 - val_loss: 2.3097 - val_accuracy: 0.4824
Epoch 228/300
313/313 [==============================] - 23s 74ms/step - loss: 2.1129 - accuracy: 0.4974 - val_loss: 2.3621 - val_accuracy: 0.4743
Epoch 229/300
313/313 [==============================] - 23s 72ms/step - loss: 2.1082 - accuracy: 0.4980 - val_loss: 2.4279 - val_accuracy: 0.4590
Epoch 230/300
313/313 [==============================] - 23s 75ms/step - loss: 2.1015 - accuracy: 0.5011 - val_loss: 2.3716 - val_accuracy: 0.4766
Epoch 231/300
313/313 [==============================] - 24s 76ms/step - loss: 2.0939 - accuracy: 0.5025 - val_loss: 2.3142 - val_accuracy: 0.4817
Epoch 232/300
313/313 [==============================] - 23s 73ms/step - loss: 2.0897 - accuracy: 0.5032 - val_loss: 2.3421 - val_accuracy: 0.4808
Epoch 233/300
313/313 [==============================] - 24s 77ms/step - loss: 2.0842 - accuracy: 0.5053 - val_loss: 2.4095 - val_accuracy: 0.4643
Epoch 234/300
313/313 [==============================] - 23s 73ms/step - loss: 2.0799 - accuracy: 0.5050 - val_loss: 2.3658 - val_accuracy: 0.4786
Epoch 234: early stopping
313/313 [==============================] - 2s 5ms/step
c:\Users\Admin\.conda\envs\gpu_env\lib\site-packages\sklearn\metrics\_classification.py:1334: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
visualkeras.layered_view(model,legend=True).show() # display using your system viewer
# del model
Model_scores
model = Sequential()
# Convolutional Layer
model.add(Conv2D(filters=32, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=32, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
# Pooling layer
model.add(MaxPooling2D(pool_size=(2, 2)))
# Dropout layers
model.add(Dropout(0.3))
model.add(Conv2D(filters=64, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=64, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
# Pooling layer
model.add(MaxPooling2D(pool_size=(2, 2)))
# Dropout layers
model.add(Dropout(0.3))
# Convolutional Layer
model.add(Conv2D(filters=128, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=128, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=128, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=128, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
# Pooling layer
model.add(MaxPooling2D(pool_size=(2, 2)))
# Dropout layers
model.add(Dropout(0.3))
# Convolutional Layer
model.add(Conv2D(filters=256, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=256, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
# Pooling layer
model.add(MaxPooling2D(pool_size=(2, 2)))
# Dropout layers
model.add(Dropout(0.3))
model.add(Flatten())
# model.add(Dropout(0.2))
model.add(Dense(128, activation='elu'))
model.add(Dropout(0.3))
model.add(Dense(100, activation='softmax',kernel_regularizer=tensorflow.keras.regularizers.L2(0.02)))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer='adamax',
metrics=['accuracy'])
early_stopping = EarlyStopping(monitor='val_loss', patience=20, verbose=1)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=5, min_lr=0.001)
mc = ModelCheckpoint('finemodel1.h5', monitor='val_accuracy', mode='max', verbose=1, save_best_only=True)
h_callback = model.fit(X_train, y_train, epochs = 100,validation_data=(X_val, y_val),batch_size=256,callbacks=[early_stopping,mc])
# Plot train vs test loss during training
plot_loss(h_callback.history['loss'], h_callback.history['val_loss'])
plot_accuracy(h_callback.history['accuracy'], h_callback.history['val_accuracy'])
Model: "sequential_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d_10 (Conv2D) (None, 32, 32, 32) 896
batch_normalization_10 (Bat (None, 32, 32, 32) 128
chNormalization)
conv2d_11 (Conv2D) (None, 32, 32, 32) 9248
batch_normalization_11 (Bat (None, 32, 32, 32) 128
chNormalization)
max_pooling2d_4 (MaxPooling (None, 16, 16, 32) 0
2D)
dropout_5 (Dropout) (None, 16, 16, 32) 0
conv2d_12 (Conv2D) (None, 16, 16, 64) 18496
batch_normalization_12 (Bat (None, 16, 16, 64) 256
chNormalization)
conv2d_13 (Conv2D) (None, 16, 16, 64) 36928
batch_normalization_13 (Bat (None, 16, 16, 64) 256
chNormalization)
max_pooling2d_5 (MaxPooling (None, 8, 8, 64) 0
2D)
dropout_6 (Dropout) (None, 8, 8, 64) 0
conv2d_14 (Conv2D) (None, 8, 8, 128) 73856
batch_normalization_14 (Bat (None, 8, 8, 128) 512
chNormalization)
conv2d_15 (Conv2D) (None, 8, 8, 128) 147584
batch_normalization_15 (Bat (None, 8, 8, 128) 512
chNormalization)
conv2d_16 (Conv2D) (None, 8, 8, 128) 147584
batch_normalization_16 (Bat (None, 8, 8, 128) 512
chNormalization)
conv2d_17 (Conv2D) (None, 8, 8, 128) 147584
batch_normalization_17 (Bat (None, 8, 8, 128) 512
chNormalization)
max_pooling2d_6 (MaxPooling (None, 4, 4, 128) 0
2D)
dropout_7 (Dropout) (None, 4, 4, 128) 0
conv2d_18 (Conv2D) (None, 4, 4, 256) 295168
batch_normalization_18 (Bat (None, 4, 4, 256) 1024
chNormalization)
conv2d_19 (Conv2D) (None, 4, 4, 256) 590080
batch_normalization_19 (Bat (None, 4, 4, 256) 1024
chNormalization)
max_pooling2d_7 (MaxPooling (None, 2, 2, 256) 0
2D)
dropout_8 (Dropout) (None, 2, 2, 256) 0
flatten_1 (Flatten) (None, 1024) 0
dense_2 (Dense) (None, 128) 131200
dropout_9 (Dropout) (None, 128) 0
dense_3 (Dense) (None, 100) 12900
=================================================================
Total params: 1,616,388
Trainable params: 1,613,956
Non-trainable params: 2,432
_________________________________________________________________
Epoch 1/100
313/313 [==============================] - ETA: 0s - loss: 5.6323 - accuracy: 0.0651
Epoch 1: val_accuracy improved from -inf to 0.04560, saving model to finemodel1.h5
313/313 [==============================] - 100s 314ms/step - loss: 5.6323 - accuracy: 0.0651 - val_loss: 5.4187 - val_accuracy: 0.0456
Epoch 2/100
312/313 [============================>.] - ETA: 0s - loss: 4.2968 - accuracy: 0.1295
Epoch 2: val_accuracy improved from 0.04560 to 0.17970, saving model to finemodel1.h5
313/313 [==============================] - 105s 332ms/step - loss: 4.2961 - accuracy: 0.1296 - val_loss: 3.8600 - val_accuracy: 0.1797
Epoch 3/100
313/313 [==============================] - ETA: 0s - loss: 3.7521 - accuracy: 0.1728
Epoch 3: val_accuracy improved from 0.17970 to 0.22930, saving model to finemodel1.h5
313/313 [==============================] - 27s 86ms/step - loss: 3.7521 - accuracy: 0.1728 - val_loss: 3.4410 - val_accuracy: 0.2293
Epoch 4/100
312/313 [============================>.] - ETA: 0s - loss: 3.4457 - accuracy: 0.2067
Epoch 4: val_accuracy improved from 0.22930 to 0.26640, saving model to finemodel1.h5
313/313 [==============================] - 26s 82ms/step - loss: 3.4455 - accuracy: 0.2068 - val_loss: 3.0928 - val_accuracy: 0.2664
Epoch 5/100
312/313 [============================>.] - ETA: 0s - loss: 3.2306 - accuracy: 0.2383
Epoch 5: val_accuracy improved from 0.26640 to 0.30700, saving model to finemodel1.h5
313/313 [==============================] - 29s 92ms/step - loss: 3.2305 - accuracy: 0.2384 - val_loss: 2.9201 - val_accuracy: 0.3070
Epoch 6/100
312/313 [============================>.] - ETA: 0s - loss: 3.0593 - accuracy: 0.2655
Epoch 6: val_accuracy improved from 0.30700 to 0.33310, saving model to finemodel1.h5
313/313 [==============================] - 28s 90ms/step - loss: 3.0593 - accuracy: 0.2655 - val_loss: 2.7293 - val_accuracy: 0.3331
Epoch 7/100
313/313 [==============================] - ETA: 0s - loss: 2.9121 - accuracy: 0.2906
Epoch 7: val_accuracy improved from 0.33310 to 0.37040, saving model to finemodel1.h5
313/313 [==============================] - 27s 87ms/step - loss: 2.9121 - accuracy: 0.2906 - val_loss: 2.5443 - val_accuracy: 0.3704
Epoch 8/100
312/313 [============================>.] - ETA: 0s - loss: 2.7908 - accuracy: 0.3130
Epoch 8: val_accuracy improved from 0.37040 to 0.38540, saving model to finemodel1.h5
313/313 [==============================] - 27s 85ms/step - loss: 2.7905 - accuracy: 0.3131 - val_loss: 2.4323 - val_accuracy: 0.3854
Epoch 9/100
312/313 [============================>.] - ETA: 0s - loss: 2.6827 - accuracy: 0.3332
Epoch 9: val_accuracy improved from 0.38540 to 0.40750, saving model to finemodel1.h5
313/313 [==============================] - 28s 88ms/step - loss: 2.6830 - accuracy: 0.3331 - val_loss: 2.3493 - val_accuracy: 0.4075
Epoch 10/100
312/313 [============================>.] - ETA: 0s - loss: 2.5878 - accuracy: 0.3503
Epoch 10: val_accuracy improved from 0.40750 to 0.41050, saving model to finemodel1.h5
313/313 [==============================] - 27s 86ms/step - loss: 2.5878 - accuracy: 0.3503 - val_loss: 2.3181 - val_accuracy: 0.4105
Epoch 11/100
312/313 [============================>.] - ETA: 0s - loss: 2.4986 - accuracy: 0.3694
Epoch 11: val_accuracy improved from 0.41050 to 0.43950, saving model to finemodel1.h5
313/313 [==============================] - 28s 90ms/step - loss: 2.4985 - accuracy: 0.3694 - val_loss: 2.2070 - val_accuracy: 0.4395
Epoch 12/100
312/313 [============================>.] - ETA: 0s - loss: 2.4186 - accuracy: 0.3867
Epoch 12: val_accuracy did not improve from 0.43950
313/313 [==============================] - 26s 82ms/step - loss: 2.4189 - accuracy: 0.3867 - val_loss: 2.2807 - val_accuracy: 0.4201
Epoch 13/100
312/313 [============================>.] - ETA: 0s - loss: 2.3515 - accuracy: 0.4004
Epoch 13: val_accuracy improved from 0.43950 to 0.45960, saving model to finemodel1.h5
313/313 [==============================] - 27s 86ms/step - loss: 2.3519 - accuracy: 0.4003 - val_loss: 2.1066 - val_accuracy: 0.4596
Epoch 14/100
312/313 [============================>.] - ETA: 0s - loss: 2.2916 - accuracy: 0.4134
Epoch 14: val_accuracy improved from 0.45960 to 0.47470, saving model to finemodel1.h5
313/313 [==============================] - 29s 91ms/step - loss: 2.2919 - accuracy: 0.4133 - val_loss: 2.0561 - val_accuracy: 0.4747
Epoch 15/100
312/313 [============================>.] - ETA: 0s - loss: 2.2275 - accuracy: 0.4285
Epoch 15: val_accuracy improved from 0.47470 to 0.48410, saving model to finemodel1.h5
313/313 [==============================] - 27s 86ms/step - loss: 2.2277 - accuracy: 0.4284 - val_loss: 2.0058 - val_accuracy: 0.4841
Epoch 16/100
312/313 [============================>.] - ETA: 0s - loss: 2.1702 - accuracy: 0.4396
Epoch 16: val_accuracy did not improve from 0.48410
313/313 [==============================] - 27s 85ms/step - loss: 2.1698 - accuracy: 0.4397 - val_loss: 2.0292 - val_accuracy: 0.4814
Epoch 17/100
312/313 [============================>.] - ETA: 0s - loss: 2.1171 - accuracy: 0.4516
Epoch 17: val_accuracy improved from 0.48410 to 0.48880, saving model to finemodel1.h5
313/313 [==============================] - 27s 86ms/step - loss: 2.1174 - accuracy: 0.4515 - val_loss: 1.9814 - val_accuracy: 0.4888
Epoch 18/100
312/313 [============================>.] - ETA: 0s - loss: 2.0653 - accuracy: 0.4624
Epoch 18: val_accuracy improved from 0.48880 to 0.49790, saving model to finemodel1.h5
313/313 [==============================] - 27s 88ms/step - loss: 2.0653 - accuracy: 0.4625 - val_loss: 1.9702 - val_accuracy: 0.4979
Epoch 19/100
312/313 [============================>.] - ETA: 0s - loss: 2.0134 - accuracy: 0.4716
Epoch 19: val_accuracy improved from 0.49790 to 0.50980, saving model to finemodel1.h5
313/313 [==============================] - 27s 85ms/step - loss: 2.0134 - accuracy: 0.4716 - val_loss: 1.9017 - val_accuracy: 0.5098
Epoch 20/100
312/313 [============================>.] - ETA: 0s - loss: 1.9694 - accuracy: 0.4832
Epoch 20: val_accuracy improved from 0.50980 to 0.51080, saving model to finemodel1.h5
313/313 [==============================] - 27s 87ms/step - loss: 1.9696 - accuracy: 0.4831 - val_loss: 1.8906 - val_accuracy: 0.5108
Epoch 21/100
313/313 [==============================] - ETA: 0s - loss: 1.9202 - accuracy: 0.4939
Epoch 21: val_accuracy improved from 0.51080 to 0.51470, saving model to finemodel1.h5
313/313 [==============================] - 27s 86ms/step - loss: 1.9202 - accuracy: 0.4939 - val_loss: 1.9005 - val_accuracy: 0.5147
Epoch 22/100
313/313 [==============================] - ETA: 0s - loss: 1.8821 - accuracy: 0.5035
Epoch 22: val_accuracy improved from 0.51470 to 0.52360, saving model to finemodel1.h5
313/313 [==============================] - 26s 84ms/step - loss: 1.8821 - accuracy: 0.5035 - val_loss: 1.8805 - val_accuracy: 0.5236
Epoch 23/100
312/313 [============================>.] - ETA: 0s - loss: 1.8458 - accuracy: 0.5107
Epoch 23: val_accuracy did not improve from 0.52360
313/313 [==============================] - 29s 93ms/step - loss: 1.8460 - accuracy: 0.5106 - val_loss: 1.8842 - val_accuracy: 0.5162
Epoch 24/100
312/313 [============================>.] - ETA: 0s - loss: 1.8009 - accuracy: 0.5198
Epoch 24: val_accuracy improved from 0.52360 to 0.52370, saving model to finemodel1.h5
313/313 [==============================] - 28s 89ms/step - loss: 1.8012 - accuracy: 0.5198 - val_loss: 1.8739 - val_accuracy: 0.5237
Epoch 25/100
313/313 [==============================] - ETA: 0s - loss: 1.7638 - accuracy: 0.5290
Epoch 25: val_accuracy did not improve from 0.52370
313/313 [==============================] - 27s 87ms/step - loss: 1.7638 - accuracy: 0.5290 - val_loss: 1.8965 - val_accuracy: 0.5190
Epoch 26/100
313/313 [==============================] - ETA: 0s - loss: 1.7332 - accuracy: 0.5365
Epoch 26: val_accuracy improved from 0.52370 to 0.53300, saving model to finemodel1.h5
313/313 [==============================] - 27s 86ms/step - loss: 1.7332 - accuracy: 0.5365 - val_loss: 1.8502 - val_accuracy: 0.5330
Epoch 27/100
312/313 [============================>.] - ETA: 0s - loss: 1.6935 - accuracy: 0.5476
Epoch 27: val_accuracy did not improve from 0.53300
313/313 [==============================] - 28s 91ms/step - loss: 1.6934 - accuracy: 0.5476 - val_loss: 1.8775 - val_accuracy: 0.5253
Epoch 28/100
312/313 [============================>.] - ETA: 0s - loss: 1.6619 - accuracy: 0.5532
Epoch 28: val_accuracy did not improve from 0.53300
313/313 [==============================] - 27s 86ms/step - loss: 1.6618 - accuracy: 0.5532 - val_loss: 1.8631 - val_accuracy: 0.5265
Epoch 29/100
313/313 [==============================] - ETA: 0s - loss: 1.6302 - accuracy: 0.5603
Epoch 29: val_accuracy did not improve from 0.53300
313/313 [==============================] - 26s 82ms/step - loss: 1.6302 - accuracy: 0.5603 - val_loss: 1.8692 - val_accuracy: 0.5316
Epoch 30/100
312/313 [============================>.] - ETA: 0s - loss: 1.6007 - accuracy: 0.5657
Epoch 30: val_accuracy improved from 0.53300 to 0.53420, saving model to finemodel1.h5
313/313 [==============================] - 27s 86ms/step - loss: 1.6005 - accuracy: 0.5658 - val_loss: 1.8607 - val_accuracy: 0.5342
Epoch 31/100
312/313 [============================>.] - ETA: 0s - loss: 1.5654 - accuracy: 0.5762
Epoch 31: val_accuracy did not improve from 0.53420
313/313 [==============================] - 26s 83ms/step - loss: 1.5655 - accuracy: 0.5762 - val_loss: 1.8660 - val_accuracy: 0.5341
Epoch 32/100
312/313 [============================>.] - ETA: 0s - loss: 1.5429 - accuracy: 0.5832
Epoch 32: val_accuracy improved from 0.53420 to 0.54120, saving model to finemodel1.h5
313/313 [==============================] - 27s 87ms/step - loss: 1.5432 - accuracy: 0.5831 - val_loss: 1.8327 - val_accuracy: 0.5412
Epoch 33/100
313/313 [==============================] - ETA: 0s - loss: 1.5113 - accuracy: 0.5907
Epoch 33: val_accuracy did not improve from 0.54120
313/313 [==============================] - 26s 83ms/step - loss: 1.5113 - accuracy: 0.5907 - val_loss: 1.8622 - val_accuracy: 0.5386
Epoch 34/100
312/313 [============================>.] - ETA: 0s - loss: 1.4830 - accuracy: 0.5951
Epoch 34: val_accuracy did not improve from 0.54120
313/313 [==============================] - 27s 85ms/step - loss: 1.4830 - accuracy: 0.5950 - val_loss: 1.8800 - val_accuracy: 0.5357
Epoch 35/100
313/313 [==============================] - ETA: 0s - loss: 1.4652 - accuracy: 0.6019
Epoch 35: val_accuracy did not improve from 0.54120
313/313 [==============================] - 26s 84ms/step - loss: 1.4652 - accuracy: 0.6019 - val_loss: 1.8451 - val_accuracy: 0.5375
Epoch 36/100
312/313 [============================>.] - ETA: 0s - loss: 1.4397 - accuracy: 0.6079
Epoch 36: val_accuracy did not improve from 0.54120
313/313 [==============================] - 26s 84ms/step - loss: 1.4401 - accuracy: 0.6079 - val_loss: 1.8551 - val_accuracy: 0.5387
Epoch 37/100
313/313 [==============================] - ETA: 0s - loss: 1.4217 - accuracy: 0.6105
Epoch 37: val_accuracy improved from 0.54120 to 0.54760, saving model to finemodel1.h5
313/313 [==============================] - 28s 89ms/step - loss: 1.4217 - accuracy: 0.6105 - val_loss: 1.8656 - val_accuracy: 0.5476
Epoch 38/100
312/313 [============================>.] - ETA: 0s - loss: 1.3990 - accuracy: 0.6160
Epoch 38: val_accuracy did not improve from 0.54760
313/313 [==============================] - 26s 84ms/step - loss: 1.3990 - accuracy: 0.6160 - val_loss: 1.8817 - val_accuracy: 0.5409
Epoch 39/100
312/313 [============================>.] - ETA: 0s - loss: 1.3737 - accuracy: 0.6247
Epoch 39: val_accuracy did not improve from 0.54760
313/313 [==============================] - 27s 85ms/step - loss: 1.3738 - accuracy: 0.6246 - val_loss: 1.8700 - val_accuracy: 0.5381
Epoch 40/100
313/313 [==============================] - ETA: 0s - loss: 1.3520 - accuracy: 0.6281
Epoch 40: val_accuracy did not improve from 0.54760
313/313 [==============================] - 25s 81ms/step - loss: 1.3520 - accuracy: 0.6281 - val_loss: 1.9222 - val_accuracy: 0.5341
Epoch 41/100
313/313 [==============================] - ETA: 0s - loss: 1.3207 - accuracy: 0.6353
Epoch 41: val_accuracy did not improve from 0.54760
313/313 [==============================] - 27s 86ms/step - loss: 1.3207 - accuracy: 0.6353 - val_loss: 1.8967 - val_accuracy: 0.5457
Epoch 42/100
312/313 [============================>.] - ETA: 0s - loss: 1.3117 - accuracy: 0.6386
Epoch 42: val_accuracy did not improve from 0.54760
313/313 [==============================] - 28s 89ms/step - loss: 1.3118 - accuracy: 0.6387 - val_loss: 1.9009 - val_accuracy: 0.5427
Epoch 43/100
312/313 [============================>.] - ETA: 0s - loss: 1.2996 - accuracy: 0.6406
Epoch 43: val_accuracy did not improve from 0.54760
313/313 [==============================] - 26s 82ms/step - loss: 1.2995 - accuracy: 0.6406 - val_loss: 1.9100 - val_accuracy: 0.5422
Epoch 44/100
313/313 [==============================] - ETA: 0s - loss: 1.2756 - accuracy: 0.6470
Epoch 44: val_accuracy did not improve from 0.54760
313/313 [==============================] - 26s 83ms/step - loss: 1.2756 - accuracy: 0.6470 - val_loss: 1.9140 - val_accuracy: 0.5428
Epoch 45/100
312/313 [============================>.] - ETA: 0s - loss: 1.2561 - accuracy: 0.6524
Epoch 45: val_accuracy did not improve from 0.54760
313/313 [==============================] - 30s 96ms/step - loss: 1.2561 - accuracy: 0.6524 - val_loss: 1.9210 - val_accuracy: 0.5374
Epoch 46/100
312/313 [============================>.] - ETA: 0s - loss: 1.2414 - accuracy: 0.6556
Epoch 46: val_accuracy did not improve from 0.54760
313/313 [==============================] - 27s 87ms/step - loss: 1.2414 - accuracy: 0.6556 - val_loss: 1.9053 - val_accuracy: 0.5436
Epoch 47/100
312/313 [============================>.] - ETA: 0s - loss: 1.2225 - accuracy: 0.6603
Epoch 47: val_accuracy did not improve from 0.54760
313/313 [==============================] - 28s 89ms/step - loss: 1.2221 - accuracy: 0.6604 - val_loss: 1.9345 - val_accuracy: 0.5404
Epoch 48/100
312/313 [============================>.] - ETA: 0s - loss: 1.2132 - accuracy: 0.6634
Epoch 48: val_accuracy did not improve from 0.54760
313/313 [==============================] - 28s 91ms/step - loss: 1.2129 - accuracy: 0.6635 - val_loss: 1.9074 - val_accuracy: 0.5413
Epoch 49/100
313/313 [==============================] - ETA: 0s - loss: 1.1918 - accuracy: 0.6685
Epoch 49: val_accuracy did not improve from 0.54760
313/313 [==============================] - 28s 90ms/step - loss: 1.1918 - accuracy: 0.6685 - val_loss: 1.9228 - val_accuracy: 0.5448
Epoch 50/100
313/313 [==============================] - ETA: 0s - loss: 1.1813 - accuracy: 0.6699
Epoch 50: val_accuracy did not improve from 0.54760
313/313 [==============================] - 26s 83ms/step - loss: 1.1813 - accuracy: 0.6699 - val_loss: 1.9419 - val_accuracy: 0.5476
Epoch 51/100
313/313 [==============================] - ETA: 0s - loss: 1.1581 - accuracy: 0.6767
Epoch 51: val_accuracy did not improve from 0.54760
313/313 [==============================] - 27s 85ms/step - loss: 1.1581 - accuracy: 0.6767 - val_loss: 1.9788 - val_accuracy: 0.5393
Epoch 52/100
312/313 [============================>.] - ETA: 0s - loss: 1.1438 - accuracy: 0.6821
Epoch 52: val_accuracy did not improve from 0.54760
313/313 [==============================] - 28s 88ms/step - loss: 1.1438 - accuracy: 0.6821 - val_loss: 1.9787 - val_accuracy: 0.5345
Epoch 52: early stopping
plot_accuracy_and_loss(h_callback)
model = Sequential()
# Convolutional Layer
model.add(Conv2D(filters=32, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=32, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='relu', padding='same'))
model.add(BatchNormalization())
# Pooling layer
model.add(MaxPooling2D(pool_size=(2, 2)))
# Dropout layers
model.add(Dropout(0.4))
model.add(Conv2D(filters=64, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=64, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='relu', padding='same'))
model.add(BatchNormalization())
# Pooling layer
model.add(MaxPooling2D(pool_size=(2, 2)))
# Dropout layers
model.add(Dropout(0.4))
# Convolutional Layer
model.add(Conv2D(filters=128, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=128, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='relu', padding='same'))
model.add(BatchNormalization())
# Pooling layer
model.add(MaxPooling2D(pool_size=(2, 2)))
# Dropout layers
model.add(Dropout(0.4))
# Convolutional Layer
model.add(Conv2D(filters=256, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=256, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='relu', padding='same'))
model.add(BatchNormalization())
# Pooling layer
model.add(MaxPooling2D(pool_size=(2, 2)))
# Dropout layers
model.add(Dropout(0.4))
model.add(Flatten())
# model.add(Dropout(0.2))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(100, activation='softmax',kernel_regularizer=tensorflow.keras.regularizers.L2(0.01)))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
early_stopping = EarlyStopping(monitor='val_loss', patience=20, verbose=1)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=5, min_lr=0.001)
mc = ModelCheckpoint('fine_final.h5', monitor='val_accuracy', mode='max', verbose=1, save_best_only=True)
h_callback = model.fit(X_train, y_train, epochs = 100,validation_data=(X_val, y_val),batch_size=256,callbacks=[mc,early_stopping,reduce_lr])
# Plot train vs test loss during training
plot_loss(h_callback.history['loss'], h_callback.history['val_loss'])
plot_accuracy(h_callback.history['accuracy'], h_callback.history['val_accuracy'])
Model: "sequential_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d_8 (Conv2D) (None, 32, 32, 32) 896
batch_normalization_8 (Batc (None, 32, 32, 32) 128
hNormalization)
conv2d_9 (Conv2D) (None, 32, 32, 32) 9248
batch_normalization_9 (Batc (None, 32, 32, 32) 128
hNormalization)
max_pooling2d_4 (MaxPooling (None, 16, 16, 32) 0
2D)
dropout_5 (Dropout) (None, 16, 16, 32) 0
conv2d_10 (Conv2D) (None, 16, 16, 64) 18496
batch_normalization_10 (Bat (None, 16, 16, 64) 256
chNormalization)
conv2d_11 (Conv2D) (None, 16, 16, 64) 36928
batch_normalization_11 (Bat (None, 16, 16, 64) 256
chNormalization)
max_pooling2d_5 (MaxPooling (None, 8, 8, 64) 0
2D)
dropout_6 (Dropout) (None, 8, 8, 64) 0
conv2d_12 (Conv2D) (None, 8, 8, 128) 73856
batch_normalization_12 (Bat (None, 8, 8, 128) 512
chNormalization)
conv2d_13 (Conv2D) (None, 8, 8, 128) 147584
batch_normalization_13 (Bat (None, 8, 8, 128) 512
chNormalization)
max_pooling2d_6 (MaxPooling (None, 4, 4, 128) 0
2D)
dropout_7 (Dropout) (None, 4, 4, 128) 0
conv2d_14 (Conv2D) (None, 4, 4, 256) 295168
batch_normalization_14 (Bat (None, 4, 4, 256) 1024
chNormalization)
conv2d_15 (Conv2D) (None, 4, 4, 256) 590080
batch_normalization_15 (Bat (None, 4, 4, 256) 1024
chNormalization)
max_pooling2d_7 (MaxPooling (None, 2, 2, 256) 0
2D)
dropout_8 (Dropout) (None, 2, 2, 256) 0
flatten_1 (Flatten) (None, 1024) 0
dense_2 (Dense) (None, 128) 131200
dropout_9 (Dropout) (None, 128) 0
dense_3 (Dense) (None, 100) 12900
=================================================================
Total params: 1,320,196
Trainable params: 1,318,276
Non-trainable params: 1,920
_________________________________________________________________
Epoch 1/100
1563/1563 [==============================] - ETA: 0s - loss: 3.7276 - accuracy: 0.1424
Epoch 1: val_accuracy improved from -inf to 0.28060, saving model to fine_final.h5
1563/1563 [==============================] - 53s 31ms/step - loss: 3.7276 - accuracy: 0.1424 - val_loss: 2.9012 - val_accuracy: 0.2806 - lr: 0.0010
Epoch 2/100
1561/1563 [============================>.] - ETA: 0s - loss: 2.9261 - accuracy: 0.2753
Epoch 2: val_accuracy improved from 0.28060 to 0.37710, saving model to fine_final.h5
1563/1563 [==============================] - 46s 30ms/step - loss: 2.9259 - accuracy: 0.2753 - val_loss: 2.4502 - val_accuracy: 0.3771 - lr: 0.0010
Epoch 3/100
1563/1563 [==============================] - ETA: 0s - loss: 2.6387 - accuracy: 0.3353
Epoch 3: val_accuracy improved from 0.37710 to 0.42780, saving model to fine_final.h5
1563/1563 [==============================] - 46s 29ms/step - loss: 2.6387 - accuracy: 0.3353 - val_loss: 2.2269 - val_accuracy: 0.4278 - lr: 0.0010
Epoch 4/100
1561/1563 [============================>.] - ETA: 0s - loss: 2.4725 - accuracy: 0.3714
Epoch 4: val_accuracy improved from 0.42780 to 0.47670, saving model to fine_final.h5
1563/1563 [==============================] - 46s 29ms/step - loss: 2.4727 - accuracy: 0.3714 - val_loss: 2.0160 - val_accuracy: 0.4767 - lr: 0.0010
Epoch 5/100
1561/1563 [============================>.] - ETA: 0s - loss: 2.3558 - accuracy: 0.3969
Epoch 5: val_accuracy improved from 0.47670 to 0.48220, saving model to fine_final.h5
1563/1563 [==============================] - 46s 29ms/step - loss: 2.3559 - accuracy: 0.3969 - val_loss: 1.9953 - val_accuracy: 0.4822 - lr: 0.0010
Epoch 6/100
1561/1563 [============================>.] - ETA: 0s - loss: 2.2703 - accuracy: 0.4169
Epoch 6: val_accuracy improved from 0.48220 to 0.50610, saving model to fine_final.h5
1563/1563 [==============================] - 46s 30ms/step - loss: 2.2703 - accuracy: 0.4170 - val_loss: 1.9199 - val_accuracy: 0.5061 - lr: 0.0010
Epoch 7/100
1561/1563 [============================>.] - ETA: 0s - loss: 2.1993 - accuracy: 0.4327
Epoch 7: val_accuracy improved from 0.50610 to 0.52390, saving model to fine_final.h5
1563/1563 [==============================] - 46s 29ms/step - loss: 2.1993 - accuracy: 0.4327 - val_loss: 1.8089 - val_accuracy: 0.5239 - lr: 0.0010
Epoch 8/100
1562/1563 [============================>.] - ETA: 0s - loss: 2.1411 - accuracy: 0.4462
Epoch 8: val_accuracy improved from 0.52390 to 0.52670, saving model to fine_final.h5
1563/1563 [==============================] - 46s 29ms/step - loss: 2.1411 - accuracy: 0.4462 - val_loss: 1.8358 - val_accuracy: 0.5267 - lr: 0.0010
Epoch 9/100
1561/1563 [============================>.] - ETA: 0s - loss: 2.0948 - accuracy: 0.4576
Epoch 9: val_accuracy improved from 0.52670 to 0.54110, saving model to fine_final.h5
1563/1563 [==============================] - 46s 29ms/step - loss: 2.0949 - accuracy: 0.4576 - val_loss: 1.7449 - val_accuracy: 0.5411 - lr: 0.0010
Epoch 10/100
1561/1563 [============================>.] - ETA: 0s - loss: 2.0567 - accuracy: 0.4659
Epoch 10: val_accuracy improved from 0.54110 to 0.55060, saving model to fine_final.h5
1563/1563 [==============================] - 46s 30ms/step - loss: 2.0568 - accuracy: 0.4659 - val_loss: 1.7252 - val_accuracy: 0.5506 - lr: 0.0010
Epoch 11/100
1561/1563 [============================>.] - ETA: 0s - loss: 2.0150 - accuracy: 0.4766
Epoch 11: val_accuracy did not improve from 0.55060
1563/1563 [==============================] - 46s 29ms/step - loss: 2.0149 - accuracy: 0.4766 - val_loss: 1.7529 - val_accuracy: 0.5464 - lr: 0.0010
Epoch 12/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.9893 - accuracy: 0.4815
Epoch 12: val_accuracy improved from 0.55060 to 0.55980, saving model to fine_final.h5
1563/1563 [==============================] - 46s 29ms/step - loss: 1.9894 - accuracy: 0.4814 - val_loss: 1.6968 - val_accuracy: 0.5598 - lr: 0.0010
Epoch 13/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.9597 - accuracy: 0.4891
Epoch 13: val_accuracy did not improve from 0.55980
1563/1563 [==============================] - 46s 29ms/step - loss: 1.9597 - accuracy: 0.4890 - val_loss: 1.6960 - val_accuracy: 0.5588 - lr: 0.0010
Epoch 14/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.9358 - accuracy: 0.4943
Epoch 14: val_accuracy did not improve from 0.55980
1563/1563 [==============================] - 46s 29ms/step - loss: 1.9357 - accuracy: 0.4943 - val_loss: 1.6944 - val_accuracy: 0.5587 - lr: 0.0010
Epoch 15/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.9139 - accuracy: 0.5002
Epoch 15: val_accuracy improved from 0.55980 to 0.56740, saving model to fine_final.h5
1563/1563 [==============================] - 46s 29ms/step - loss: 1.9138 - accuracy: 0.5002 - val_loss: 1.6878 - val_accuracy: 0.5674 - lr: 0.0010
Epoch 16/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.8947 - accuracy: 0.5057
Epoch 16: val_accuracy did not improve from 0.56740
1563/1563 [==============================] - 46s 29ms/step - loss: 1.8947 - accuracy: 0.5057 - val_loss: 1.7016 - val_accuracy: 0.5579 - lr: 0.0010
Epoch 17/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.8736 - accuracy: 0.5105
Epoch 17: val_accuracy improved from 0.56740 to 0.57250, saving model to fine_final.h5
1563/1563 [==============================] - 46s 29ms/step - loss: 1.8736 - accuracy: 0.5105 - val_loss: 1.6525 - val_accuracy: 0.5725 - lr: 0.0010
Epoch 18/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.8536 - accuracy: 0.5147
Epoch 18: val_accuracy did not improve from 0.57250
1563/1563 [==============================] - 46s 29ms/step - loss: 1.8536 - accuracy: 0.5147 - val_loss: 1.6802 - val_accuracy: 0.5663 - lr: 0.0010
Epoch 19/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.8383 - accuracy: 0.5187
Epoch 19: val_accuracy did not improve from 0.57250
1563/1563 [==============================] - 46s 29ms/step - loss: 1.8384 - accuracy: 0.5187 - val_loss: 1.6598 - val_accuracy: 0.5705 - lr: 0.0010
Epoch 20/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.8234 - accuracy: 0.5227
Epoch 20: val_accuracy did not improve from 0.57250
1563/1563 [==============================] - 46s 29ms/step - loss: 1.8234 - accuracy: 0.5227 - val_loss: 1.6600 - val_accuracy: 0.5715 - lr: 0.0010
Epoch 21/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.8069 - accuracy: 0.5266
Epoch 21: val_accuracy improved from 0.57250 to 0.57810, saving model to fine_final.h5
1563/1563 [==============================] - 46s 29ms/step - loss: 1.8069 - accuracy: 0.5266 - val_loss: 1.6613 - val_accuracy: 0.5781 - lr: 0.0010
Epoch 22/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.7967 - accuracy: 0.5289
Epoch 22: val_accuracy improved from 0.57810 to 0.57890, saving model to fine_final.h5
1563/1563 [==============================] - 46s 30ms/step - loss: 1.7967 - accuracy: 0.5290 - val_loss: 1.6281 - val_accuracy: 0.5789 - lr: 0.0010
Epoch 23/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.7813 - accuracy: 0.5331
Epoch 23: val_accuracy improved from 0.57890 to 0.58080, saving model to fine_final.h5
1563/1563 [==============================] - 46s 30ms/step - loss: 1.7812 - accuracy: 0.5332 - val_loss: 1.6168 - val_accuracy: 0.5808 - lr: 0.0010
Epoch 24/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.7696 - accuracy: 0.5348
Epoch 24: val_accuracy improved from 0.58080 to 0.59080, saving model to fine_final.h5
1563/1563 [==============================] - 46s 30ms/step - loss: 1.7697 - accuracy: 0.5348 - val_loss: 1.5951 - val_accuracy: 0.5908 - lr: 0.0010
Epoch 25/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.7573 - accuracy: 0.5388
Epoch 25: val_accuracy improved from 0.59080 to 0.59170, saving model to fine_final.h5
1563/1563 [==============================] - 46s 30ms/step - loss: 1.7573 - accuracy: 0.5388 - val_loss: 1.6114 - val_accuracy: 0.5917 - lr: 0.0010
Epoch 26/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.7465 - accuracy: 0.5412
Epoch 26: val_accuracy improved from 0.59170 to 0.59230, saving model to fine_final.h5
1563/1563 [==============================] - 46s 29ms/step - loss: 1.7464 - accuracy: 0.5412 - val_loss: 1.6046 - val_accuracy: 0.5923 - lr: 0.0010
Epoch 27/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.7368 - accuracy: 0.5441
Epoch 27: val_accuracy did not improve from 0.59230
1563/1563 [==============================] - 46s 29ms/step - loss: 1.7369 - accuracy: 0.5441 - val_loss: 1.6242 - val_accuracy: 0.5888 - lr: 0.0010
Epoch 28/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.7282 - accuracy: 0.5464
Epoch 28: val_accuracy did not improve from 0.59230
1563/1563 [==============================] - 46s 29ms/step - loss: 1.7283 - accuracy: 0.5464 - val_loss: 1.6777 - val_accuracy: 0.5750 - lr: 0.0010
Epoch 29/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.7149 - accuracy: 0.5490
Epoch 29: val_accuracy did not improve from 0.59230
1563/1563 [==============================] - 46s 29ms/step - loss: 1.7148 - accuracy: 0.5491 - val_loss: 1.6339 - val_accuracy: 0.5844 - lr: 0.0010
Epoch 30/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.7063 - accuracy: 0.5517
Epoch 30: val_accuracy did not improve from 0.59230
1563/1563 [==============================] - 46s 29ms/step - loss: 1.7061 - accuracy: 0.5517 - val_loss: 1.6349 - val_accuracy: 0.5841 - lr: 0.0010
Epoch 31/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.6980 - accuracy: 0.5534
Epoch 31: val_accuracy improved from 0.59230 to 0.59340, saving model to fine_final.h5
1563/1563 [==============================] - 46s 30ms/step - loss: 1.6980 - accuracy: 0.5533 - val_loss: 1.5877 - val_accuracy: 0.5934 - lr: 0.0010
Epoch 32/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.6920 - accuracy: 0.5549
Epoch 32: val_accuracy improved from 0.59340 to 0.59700, saving model to fine_final.h5
1563/1563 [==============================] - 46s 30ms/step - loss: 1.6919 - accuracy: 0.5549 - val_loss: 1.5813 - val_accuracy: 0.5970 - lr: 0.0010
Epoch 33/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.6813 - accuracy: 0.5578
Epoch 33: val_accuracy did not improve from 0.59700
1563/1563 [==============================] - 46s 29ms/step - loss: 1.6815 - accuracy: 0.5577 - val_loss: 1.6251 - val_accuracy: 0.5859 - lr: 0.0010
Epoch 34/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.6758 - accuracy: 0.5597
Epoch 34: val_accuracy improved from 0.59700 to 0.59960, saving model to fine_final.h5
1563/1563 [==============================] - 46s 29ms/step - loss: 1.6758 - accuracy: 0.5597 - val_loss: 1.5950 - val_accuracy: 0.5996 - lr: 0.0010
Epoch 35/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.6702 - accuracy: 0.5610
Epoch 35: val_accuracy did not improve from 0.59960
1563/1563 [==============================] - 46s 29ms/step - loss: 1.6702 - accuracy: 0.5610 - val_loss: 1.5879 - val_accuracy: 0.5968 - lr: 0.0010
Epoch 36/100
1563/1563 [==============================] - ETA: 0s - loss: 1.6594 - accuracy: 0.5633
Epoch 36: val_accuracy did not improve from 0.59960
1563/1563 [==============================] - 46s 29ms/step - loss: 1.6594 - accuracy: 0.5633 - val_loss: 1.6332 - val_accuracy: 0.5897 - lr: 0.0010
Epoch 37/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.6521 - accuracy: 0.5656
Epoch 37: val_accuracy did not improve from 0.59960
1563/1563 [==============================] - 46s 29ms/step - loss: 1.6522 - accuracy: 0.5656 - val_loss: 1.6127 - val_accuracy: 0.5912 - lr: 0.0010
Epoch 38/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.6456 - accuracy: 0.5674
Epoch 38: val_accuracy did not improve from 0.59960
1563/1563 [==============================] - 46s 29ms/step - loss: 1.6456 - accuracy: 0.5674 - val_loss: 1.6120 - val_accuracy: 0.5937 - lr: 0.0010
Epoch 39/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.6373 - accuracy: 0.5691
Epoch 39: val_accuracy did not improve from 0.59960
1563/1563 [==============================] - 46s 29ms/step - loss: 1.6372 - accuracy: 0.5691 - val_loss: 1.6047 - val_accuracy: 0.5985 - lr: 0.0010
Epoch 40/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.6347 - accuracy: 0.5695
Epoch 40: val_accuracy did not improve from 0.59960
1563/1563 [==============================] - 46s 29ms/step - loss: 1.6347 - accuracy: 0.5696 - val_loss: 1.6020 - val_accuracy: 0.5958 - lr: 0.0010
Epoch 41/100
1563/1563 [==============================] - ETA: 0s - loss: 1.6273 - accuracy: 0.5719
Epoch 41: val_accuracy did not improve from 0.59960
1563/1563 [==============================] - 46s 29ms/step - loss: 1.6273 - accuracy: 0.5719 - val_loss: 1.6109 - val_accuracy: 0.5903 - lr: 0.0010
Epoch 42/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.6188 - accuracy: 0.5738
Epoch 42: val_accuracy improved from 0.59960 to 0.59990, saving model to fine_final.h5
1563/1563 [==============================] - 46s 30ms/step - loss: 1.6189 - accuracy: 0.5738 - val_loss: 1.5898 - val_accuracy: 0.5999 - lr: 0.0010
Epoch 43/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.6128 - accuracy: 0.5761
Epoch 43: val_accuracy did not improve from 0.59990
1563/1563 [==============================] - 46s 29ms/step - loss: 1.6128 - accuracy: 0.5761 - val_loss: 1.6137 - val_accuracy: 0.5921 - lr: 0.0010
Epoch 44/100
1563/1563 [==============================] - ETA: 0s - loss: 1.6111 - accuracy: 0.5760
Epoch 44: val_accuracy improved from 0.59990 to 0.60390, saving model to fine_final.h5
1563/1563 [==============================] - 46s 30ms/step - loss: 1.6111 - accuracy: 0.5760 - val_loss: 1.5821 - val_accuracy: 0.6039 - lr: 0.0010
Epoch 45/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.5997 - accuracy: 0.5781
Epoch 45: val_accuracy did not improve from 0.60390
1563/1563 [==============================] - 46s 29ms/step - loss: 1.5996 - accuracy: 0.5781 - val_loss: 1.6105 - val_accuracy: 0.5957 - lr: 0.0010
Epoch 46/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.5955 - accuracy: 0.5800
Epoch 46: val_accuracy did not improve from 0.60390
1563/1563 [==============================] - 46s 29ms/step - loss: 1.5955 - accuracy: 0.5800 - val_loss: 1.6019 - val_accuracy: 0.6020 - lr: 0.0010
Epoch 47/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.5946 - accuracy: 0.5802
Epoch 47: val_accuracy did not improve from 0.60390
1563/1563 [==============================] - 46s 29ms/step - loss: 1.5945 - accuracy: 0.5802 - val_loss: 1.6074 - val_accuracy: 0.6029 - lr: 0.0010
Epoch 48/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.5859 - accuracy: 0.5819
Epoch 48: val_accuracy improved from 0.60390 to 0.60590, saving model to fine_final.h5
1563/1563 [==============================] - 46s 30ms/step - loss: 1.5859 - accuracy: 0.5819 - val_loss: 1.5852 - val_accuracy: 0.6059 - lr: 0.0010
Epoch 49/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.5838 - accuracy: 0.5825
Epoch 49: val_accuracy did not improve from 0.60590
1563/1563 [==============================] - 46s 29ms/step - loss: 1.5838 - accuracy: 0.5825 - val_loss: 1.5979 - val_accuracy: 0.6027 - lr: 0.0010
Epoch 50/100
1563/1563 [==============================] - ETA: 0s - loss: 1.5764 - accuracy: 0.5852
Epoch 50: val_accuracy did not improve from 0.60590
1563/1563 [==============================] - 46s 29ms/step - loss: 1.5764 - accuracy: 0.5852 - val_loss: 1.5939 - val_accuracy: 0.6013 - lr: 0.0010
Epoch 51/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.5689 - accuracy: 0.5868
Epoch 51: val_accuracy did not improve from 0.60590
1563/1563 [==============================] - 46s 29ms/step - loss: 1.5688 - accuracy: 0.5869 - val_loss: 1.6071 - val_accuracy: 0.5975 - lr: 0.0010
Epoch 52/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.5673 - accuracy: 0.5866
Epoch 52: val_accuracy did not improve from 0.60590
1563/1563 [==============================] - 46s 29ms/step - loss: 1.5674 - accuracy: 0.5866 - val_loss: 1.5772 - val_accuracy: 0.6040 - lr: 0.0010
Epoch 53/100
1563/1563 [==============================] - ETA: 0s - loss: 1.5627 - accuracy: 0.5876
Epoch 53: val_accuracy did not improve from 0.60590
1563/1563 [==============================] - 46s 29ms/step - loss: 1.5627 - accuracy: 0.5876 - val_loss: 1.5947 - val_accuracy: 0.6055 - lr: 0.0010
Epoch 54/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.5572 - accuracy: 0.5885
Epoch 54: val_accuracy did not improve from 0.60590
1563/1563 [==============================] - 46s 29ms/step - loss: 1.5573 - accuracy: 0.5884 - val_loss: 1.6079 - val_accuracy: 0.6010 - lr: 0.0010
Epoch 55/100
1563/1563 [==============================] - ETA: 0s - loss: 1.5522 - accuracy: 0.5901
Epoch 55: val_accuracy improved from 0.60590 to 0.60760, saving model to fine_final.h5
1563/1563 [==============================] - 46s 30ms/step - loss: 1.5522 - accuracy: 0.5901 - val_loss: 1.5861 - val_accuracy: 0.6076 - lr: 0.0010
Epoch 56/100
1562/1563 [============================>.] - ETA: 0s - loss: 1.5480 - accuracy: 0.5913
Epoch 56: val_accuracy improved from 0.60760 to 0.61120, saving model to fine_final.h5
1563/1563 [==============================] - 46s 30ms/step - loss: 1.5480 - accuracy: 0.5913 - val_loss: 1.5773 - val_accuracy: 0.6112 - lr: 0.0010
Epoch 57/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.5442 - accuracy: 0.5933
Epoch 57: val_accuracy improved from 0.61120 to 0.61130, saving model to fine_final.h5
1563/1563 [==============================] - 46s 30ms/step - loss: 1.5444 - accuracy: 0.5933 - val_loss: 1.5793 - val_accuracy: 0.6113 - lr: 0.0010
Epoch 58/100
1563/1563 [==============================] - ETA: 0s - loss: 1.5397 - accuracy: 0.5932
Epoch 58: val_accuracy did not improve from 0.61130
1563/1563 [==============================] - 46s 29ms/step - loss: 1.5397 - accuracy: 0.5932 - val_loss: 1.6105 - val_accuracy: 0.6022 - lr: 0.0010
Epoch 59/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.5371 - accuracy: 0.5933
Epoch 59: val_accuracy did not improve from 0.61130
1563/1563 [==============================] - 46s 29ms/step - loss: 1.5372 - accuracy: 0.5933 - val_loss: 1.5921 - val_accuracy: 0.6075 - lr: 0.0010
Epoch 60/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.5282 - accuracy: 0.5967
Epoch 60: val_accuracy did not improve from 0.61130
1563/1563 [==============================] - 46s 29ms/step - loss: 1.5282 - accuracy: 0.5966 - val_loss: 1.6026 - val_accuracy: 0.6037 - lr: 0.0010
Epoch 61/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.5244 - accuracy: 0.5975
Epoch 61: val_accuracy did not improve from 0.61130
1563/1563 [==============================] - 46s 29ms/step - loss: 1.5245 - accuracy: 0.5975 - val_loss: 1.5691 - val_accuracy: 0.6111 - lr: 0.0010
Epoch 62/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.5201 - accuracy: 0.5986
Epoch 62: val_accuracy did not improve from 0.61130
1563/1563 [==============================] - 46s 29ms/step - loss: 1.5202 - accuracy: 0.5986 - val_loss: 1.6535 - val_accuracy: 0.5947 - lr: 0.0010
Epoch 63/100
1563/1563 [==============================] - ETA: 0s - loss: 1.5190 - accuracy: 0.5988
Epoch 63: val_accuracy did not improve from 0.61130
1563/1563 [==============================] - 46s 29ms/step - loss: 1.5190 - accuracy: 0.5988 - val_loss: 1.5905 - val_accuracy: 0.6066 - lr: 0.0010
Epoch 64/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.5144 - accuracy: 0.5999
Epoch 64: val_accuracy improved from 0.61130 to 0.61180, saving model to fine_final.h5
1563/1563 [==============================] - 46s 30ms/step - loss: 1.5144 - accuracy: 0.5999 - val_loss: 1.5758 - val_accuracy: 0.6118 - lr: 0.0010
Epoch 65/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.5120 - accuracy: 0.6004
Epoch 65: val_accuracy did not improve from 0.61180
1563/1563 [==============================] - 46s 29ms/step - loss: 1.5120 - accuracy: 0.6004 - val_loss: 1.5999 - val_accuracy: 0.6093 - lr: 0.0010
Epoch 66/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.5080 - accuracy: 0.6016
Epoch 66: val_accuracy improved from 0.61180 to 0.61400, saving model to fine_final.h5
1563/1563 [==============================] - 46s 30ms/step - loss: 1.5080 - accuracy: 0.6016 - val_loss: 1.5639 - val_accuracy: 0.6140 - lr: 0.0010
Epoch 67/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.5054 - accuracy: 0.6024
Epoch 67: val_accuracy did not improve from 0.61400
1563/1563 [==============================] - 46s 30ms/step - loss: 1.5053 - accuracy: 0.6024 - val_loss: 1.5873 - val_accuracy: 0.6100 - lr: 0.0010
Epoch 68/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.4983 - accuracy: 0.6046
Epoch 68: val_accuracy did not improve from 0.61400
1563/1563 [==============================] - 46s 29ms/step - loss: 1.4983 - accuracy: 0.6046 - val_loss: 1.5835 - val_accuracy: 0.6078 - lr: 0.0010
Epoch 69/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.4975 - accuracy: 0.6045
Epoch 69: val_accuracy did not improve from 0.61400
1563/1563 [==============================] - 46s 29ms/step - loss: 1.4974 - accuracy: 0.6046 - val_loss: 1.6045 - val_accuracy: 0.6066 - lr: 0.0010
Epoch 70/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.4938 - accuracy: 0.6056
Epoch 70: val_accuracy improved from 0.61400 to 0.61660, saving model to fine_final.h5
1563/1563 [==============================] - 46s 30ms/step - loss: 1.4938 - accuracy: 0.6056 - val_loss: 1.5660 - val_accuracy: 0.6166 - lr: 0.0010
Epoch 71/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.4924 - accuracy: 0.6056
Epoch 71: val_accuracy did not improve from 0.61660
1563/1563 [==============================] - 46s 29ms/step - loss: 1.4923 - accuracy: 0.6056 - val_loss: 1.5885 - val_accuracy: 0.6111 - lr: 0.0010
Epoch 72/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.4875 - accuracy: 0.6066
Epoch 72: val_accuracy did not improve from 0.61660
1563/1563 [==============================] - 46s 29ms/step - loss: 1.4874 - accuracy: 0.6066 - val_loss: 1.5896 - val_accuracy: 0.6029 - lr: 0.0010
Epoch 73/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.4855 - accuracy: 0.6075
Epoch 73: val_accuracy did not improve from 0.61660
1563/1563 [==============================] - 46s 29ms/step - loss: 1.4856 - accuracy: 0.6075 - val_loss: 1.6200 - val_accuracy: 0.6075 - lr: 0.0010
Epoch 74/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.4869 - accuracy: 0.6070
Epoch 74: val_accuracy did not improve from 0.61660
1563/1563 [==============================] - 46s 29ms/step - loss: 1.4868 - accuracy: 0.6070 - val_loss: 1.5546 - val_accuracy: 0.6157 - lr: 0.0010
Epoch 75/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.4787 - accuracy: 0.6086
Epoch 75: val_accuracy did not improve from 0.61660
1563/1563 [==============================] - 46s 29ms/step - loss: 1.4787 - accuracy: 0.6086 - val_loss: 1.6001 - val_accuracy: 0.6085 - lr: 0.0010
Epoch 76/100
1562/1563 [============================>.] - ETA: 0s - loss: 1.4757 - accuracy: 0.6110
Epoch 76: val_accuracy did not improve from 0.61660
1563/1563 [==============================] - 46s 30ms/step - loss: 1.4757 - accuracy: 0.6110 - val_loss: 1.5854 - val_accuracy: 0.6132 - lr: 0.0010
Epoch 77/100
1562/1563 [============================>.] - ETA: 0s - loss: 1.4738 - accuracy: 0.6109
Epoch 77: val_accuracy did not improve from 0.61660
1563/1563 [==============================] - 46s 30ms/step - loss: 1.4739 - accuracy: 0.6109 - val_loss: 1.5530 - val_accuracy: 0.6160 - lr: 0.0010
Epoch 78/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.4675 - accuracy: 0.6114
Epoch 78: val_accuracy did not improve from 0.61660
1563/1563 [==============================] - 46s 29ms/step - loss: 1.4677 - accuracy: 0.6114 - val_loss: 1.6077 - val_accuracy: 0.6091 - lr: 0.0010
Epoch 79/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.4671 - accuracy: 0.6129
Epoch 79: val_accuracy did not improve from 0.61660
1563/1563 [==============================] - 46s 29ms/step - loss: 1.4672 - accuracy: 0.6128 - val_loss: 1.6050 - val_accuracy: 0.6072 - lr: 0.0010
Epoch 80/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.4667 - accuracy: 0.6122
Epoch 80: val_accuracy did not improve from 0.61660
1563/1563 [==============================] - 46s 29ms/step - loss: 1.4667 - accuracy: 0.6122 - val_loss: 1.5875 - val_accuracy: 0.6102 - lr: 0.0010
Epoch 81/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.4617 - accuracy: 0.6133
Epoch 81: val_accuracy did not improve from 0.61660
1563/1563 [==============================] - 46s 29ms/step - loss: 1.4616 - accuracy: 0.6133 - val_loss: 1.5998 - val_accuracy: 0.6114 - lr: 0.0010
Epoch 82/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.4622 - accuracy: 0.6135
Epoch 82: val_accuracy did not improve from 0.61660
1563/1563 [==============================] - 46s 29ms/step - loss: 1.4622 - accuracy: 0.6135 - val_loss: 1.5949 - val_accuracy: 0.6061 - lr: 0.0010
Epoch 83/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.4580 - accuracy: 0.6143
Epoch 83: val_accuracy did not improve from 0.61660
1563/1563 [==============================] - 46s 29ms/step - loss: 1.4582 - accuracy: 0.6143 - val_loss: 1.6083 - val_accuracy: 0.6063 - lr: 0.0010
Epoch 84/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.4565 - accuracy: 0.6151
Epoch 84: val_accuracy did not improve from 0.61660
1563/1563 [==============================] - 46s 29ms/step - loss: 1.4566 - accuracy: 0.6151 - val_loss: 1.6181 - val_accuracy: 0.6013 - lr: 0.0010
Epoch 85/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.4523 - accuracy: 0.6149
Epoch 85: val_accuracy did not improve from 0.61660
1563/1563 [==============================] - 46s 29ms/step - loss: 1.4524 - accuracy: 0.6149 - val_loss: 1.5865 - val_accuracy: 0.6153 - lr: 0.0010
Epoch 86/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.4509 - accuracy: 0.6168
Epoch 86: val_accuracy did not improve from 0.61660
1563/1563 [==============================] - 46s 29ms/step - loss: 1.4508 - accuracy: 0.6168 - val_loss: 1.5861 - val_accuracy: 0.6088 - lr: 0.0010
Epoch 87/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.4499 - accuracy: 0.6162
Epoch 87: val_accuracy did not improve from 0.61660
1563/1563 [==============================] - 46s 29ms/step - loss: 1.4499 - accuracy: 0.6162 - val_loss: 1.6167 - val_accuracy: 0.6087 - lr: 0.0010
Epoch 88/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.4438 - accuracy: 0.6184
Epoch 88: val_accuracy did not improve from 0.61660
1563/1563 [==============================] - 46s 29ms/step - loss: 1.4438 - accuracy: 0.6184 - val_loss: 1.5712 - val_accuracy: 0.6153 - lr: 0.0010
Epoch 89/100
1562/1563 [============================>.] - ETA: 0s - loss: 1.4445 - accuracy: 0.6175
Epoch 89: val_accuracy did not improve from 0.61660
1563/1563 [==============================] - 46s 29ms/step - loss: 1.4445 - accuracy: 0.6175 - val_loss: 1.6270 - val_accuracy: 0.6031 - lr: 0.0010
Epoch 90/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.4433 - accuracy: 0.6176
Epoch 90: val_accuracy did not improve from 0.61660
1563/1563 [==============================] - 46s 29ms/step - loss: 1.4434 - accuracy: 0.6176 - val_loss: 1.5859 - val_accuracy: 0.6083 - lr: 0.0010
Epoch 91/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.4350 - accuracy: 0.6207
Epoch 91: val_accuracy did not improve from 0.61660
1563/1563 [==============================] - 46s 30ms/step - loss: 1.4350 - accuracy: 0.6207 - val_loss: 1.5929 - val_accuracy: 0.6102 - lr: 0.0010
Epoch 92/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.4372 - accuracy: 0.6199
Epoch 92: val_accuracy did not improve from 0.61660
1563/1563 [==============================] - 46s 29ms/step - loss: 1.4372 - accuracy: 0.6199 - val_loss: 1.6102 - val_accuracy: 0.6097 - lr: 0.0010
Epoch 93/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.4339 - accuracy: 0.6210
Epoch 93: val_accuracy did not improve from 0.61660
1563/1563 [==============================] - 46s 29ms/step - loss: 1.4341 - accuracy: 0.6210 - val_loss: 1.6134 - val_accuracy: 0.6059 - lr: 0.0010
Epoch 94/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.4317 - accuracy: 0.6203
Epoch 94: val_accuracy did not improve from 0.61660
1563/1563 [==============================] - 46s 30ms/step - loss: 1.4318 - accuracy: 0.6203 - val_loss: 1.6036 - val_accuracy: 0.6043 - lr: 0.0010
Epoch 95/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.4284 - accuracy: 0.6220
Epoch 95: val_accuracy did not improve from 0.61660
1563/1563 [==============================] - 46s 29ms/step - loss: 1.4285 - accuracy: 0.6220 - val_loss: 1.6196 - val_accuracy: 0.6029 - lr: 0.0010
Epoch 96/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.4268 - accuracy: 0.6218
Epoch 96: val_accuracy did not improve from 0.61660
1563/1563 [==============================] - 46s 29ms/step - loss: 1.4268 - accuracy: 0.6218 - val_loss: 1.5973 - val_accuracy: 0.6100 - lr: 0.0010
Epoch 97/100
1561/1563 [============================>.] - ETA: 0s - loss: 1.4279 - accuracy: 0.6216
Epoch 97: val_accuracy did not improve from 0.61660
1563/1563 [==============================] - 46s 29ms/step - loss: 1.4277 - accuracy: 0.6217 - val_loss: 1.5627 - val_accuracy: 0.6139 - lr: 0.0010
Epoch 97: early stopping
visualkeras.layered_view(model,legend=True)
import efficientnet.keras as efn
from keras.layers import GlobalAveragePooling2D
efnb0 = efn.EfficientNetB0(weights='imagenet', include_top=False, input_shape=(32,32,3), classes=100)
model = Sequential()
model.add(efnb0)
model.add(GlobalAveragePooling2D())
model.add(Dropout(0.5))
model.add(Dense(100, activation='softmax'))
model.summary()
Downloading data from https://github.com/Callidior/keras-applications/releases/download/efficientnet/efficientnet-b0_weights_tf_dim_ordering_tf_kernels_autoaugment_notop.h5
16804768/16804768 [==============================] - 2s 0us/step
Model: "sequential_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
efficientnet-b0 (Functional (None, 1, 1, 1280) 4049564
)
global_average_pooling2d (G (None, 1280) 0
lobalAveragePooling2D)
dropout_4 (Dropout) (None, 1280) 0
dense_2 (Dense) (None, 100) 128100
=================================================================
Total params: 4,177,664
Trainable params: 4,135,648
Non-trainable params: 42,016
_________________________________________________________________
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
early_stopping = EarlyStopping(monitor='val_loss', patience=10, verbose=1)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=5, min_lr=0.001)
mc = ModelCheckpoint('efficientnetB0.h5', monitor='val_accuracy', mode='max', verbose=1, save_best_only=True)
model.fit(X_train, y_train, epochs=100, validation_data=(X_val, y_val), callbacks=[early_stopping,reduce_lr,mc])
test_loss,test_acc=model.evaluate(X_test, y_test, verbose=0)
print('Test accuracy:', test_acc)
Epoch 1/100 1250/1250 [==============================] - ETA: 0s - loss: 4.4852 - accuracy: 0.0763 Epoch 1: val_accuracy improved from -inf to 0.20820, saving model to efficientnetB0.h5 1250/1250 [==============================] - 68s 50ms/step - loss: 4.4852 - accuracy: 0.0763 - val_loss: 3.4620 - val_accuracy: 0.2082 - lr: 0.0100 Epoch 2/100 1250/1250 [==============================] - ETA: 0s - loss: 3.3308 - accuracy: 0.2153 Epoch 2: val_accuracy improved from 0.20820 to 0.32630, saving model to efficientnetB0.h5 1250/1250 [==============================] - 61s 49ms/step - loss: 3.3308 - accuracy: 0.2153 - val_loss: 2.7290 - val_accuracy: 0.3263 - lr: 0.0100 Epoch 3/100 1249/1250 [============================>.] - ETA: 0s - loss: 2.7983 - accuracy: 0.3032 Epoch 3: val_accuracy improved from 0.32630 to 0.38890, saving model to efficientnetB0.h5 1250/1250 [==============================] - 61s 48ms/step - loss: 2.7979 - accuracy: 0.3034 - val_loss: 2.4184 - val_accuracy: 0.3889 - lr: 0.0100 Epoch 4/100 1249/1250 [============================>.] - ETA: 0s - loss: 2.4923 - accuracy: 0.3597 Epoch 4: val_accuracy improved from 0.38890 to 0.44130, saving model to efficientnetB0.h5 1250/1250 [==============================] - 61s 48ms/step - loss: 2.4920 - accuracy: 0.3597 - val_loss: 2.1391 - val_accuracy: 0.4413 - lr: 0.0100 Epoch 5/100 1249/1250 [============================>.] - ETA: 0s - loss: 2.2859 - accuracy: 0.4032 Epoch 5: val_accuracy improved from 0.44130 to 0.46300, saving model to efficientnetB0.h5 1250/1250 [==============================] - 61s 48ms/step - loss: 2.2858 - accuracy: 0.4031 - val_loss: 2.0184 - val_accuracy: 0.4630 - lr: 0.0100 Epoch 6/100 1250/1250 [==============================] - ETA: 0s - loss: 2.1423 - accuracy: 0.4331 Epoch 6: val_accuracy improved from 0.46300 to 0.48790, saving model to efficientnetB0.h5 1250/1250 [==============================] - 60s 48ms/step - loss: 2.1423 - accuracy: 0.4331 - val_loss: 1.9229 - val_accuracy: 0.4879 - lr: 0.0100 Epoch 7/100 1249/1250 [============================>.] - ETA: 0s - loss: 2.0019 - accuracy: 0.4599 Epoch 7: val_accuracy improved from 0.48790 to 0.50240, saving model to efficientnetB0.h5 1250/1250 [==============================] - 61s 49ms/step - loss: 2.0016 - accuracy: 0.4600 - val_loss: 1.8596 - val_accuracy: 0.5024 - lr: 0.0100 Epoch 8/100 1250/1250 [==============================] - ETA: 0s - loss: 1.8974 - accuracy: 0.4863 Epoch 8: val_accuracy improved from 0.50240 to 0.51440, saving model to efficientnetB0.h5 1250/1250 [==============================] - 61s 49ms/step - loss: 1.8974 - accuracy: 0.4863 - val_loss: 1.7904 - val_accuracy: 0.5144 - lr: 0.0100 Epoch 9/100 1249/1250 [============================>.] - ETA: 0s - loss: 1.8078 - accuracy: 0.5081 Epoch 9: val_accuracy improved from 0.51440 to 0.52280, saving model to efficientnetB0.h5 1250/1250 [==============================] - 60s 48ms/step - loss: 1.8080 - accuracy: 0.5081 - val_loss: 1.7420 - val_accuracy: 0.5228 - lr: 0.0100 Epoch 10/100 1249/1250 [============================>.] - ETA: 0s - loss: 1.7305 - accuracy: 0.5239 Epoch 10: val_accuracy improved from 0.52280 to 0.53110, saving model to efficientnetB0.h5 1250/1250 [==============================] - 62s 50ms/step - loss: 1.7305 - accuracy: 0.5239 - val_loss: 1.7143 - val_accuracy: 0.5311 - lr: 0.0100 Epoch 11/100 1250/1250 [==============================] - ETA: 0s - loss: 1.6551 - accuracy: 0.5426 Epoch 11: val_accuracy improved from 0.53110 to 0.53320, saving model to efficientnetB0.h5 1250/1250 [==============================] - 61s 49ms/step - loss: 1.6551 - accuracy: 0.5426 - val_loss: 1.6964 - val_accuracy: 0.5332 - lr: 0.0100 Epoch 12/100 1250/1250 [==============================] - ETA: 0s - loss: 1.5964 - accuracy: 0.5548 Epoch 12: val_accuracy improved from 0.53320 to 0.54260, saving model to efficientnetB0.h5 1250/1250 [==============================] - 60s 48ms/step - loss: 1.5964 - accuracy: 0.5548 - val_loss: 1.6614 - val_accuracy: 0.5426 - lr: 0.0100 Epoch 13/100 1250/1250 [==============================] - ETA: 0s - loss: 1.5222 - accuracy: 0.5738 Epoch 13: val_accuracy did not improve from 0.54260 1250/1250 [==============================] - 60s 48ms/step - loss: 1.5222 - accuracy: 0.5738 - val_loss: 1.7388 - val_accuracy: 0.5296 - lr: 0.0100 Epoch 14/100 1250/1250 [==============================] - ETA: 0s - loss: 1.4651 - accuracy: 0.5852 Epoch 14: val_accuracy improved from 0.54260 to 0.55600, saving model to efficientnetB0.h5 1250/1250 [==============================] - 61s 48ms/step - loss: 1.4651 - accuracy: 0.5852 - val_loss: 1.6307 - val_accuracy: 0.5560 - lr: 0.0100 Epoch 15/100 1250/1250 [==============================] - ETA: 0s - loss: 1.4219 - accuracy: 0.5974 Epoch 15: val_accuracy improved from 0.55600 to 0.55620, saving model to efficientnetB0.h5 1250/1250 [==============================] - 60s 48ms/step - loss: 1.4219 - accuracy: 0.5974 - val_loss: 1.6278 - val_accuracy: 0.5562 - lr: 0.0100 Epoch 16/100 1250/1250 [==============================] - ETA: 0s - loss: 1.3702 - accuracy: 0.6090 Epoch 16: val_accuracy improved from 0.55620 to 0.55680, saving model to efficientnetB0.h5 1250/1250 [==============================] - 60s 48ms/step - loss: 1.3702 - accuracy: 0.6090 - val_loss: 1.6261 - val_accuracy: 0.5568 - lr: 0.0100 Epoch 17/100 1250/1250 [==============================] - ETA: 0s - loss: 1.3188 - accuracy: 0.6202 Epoch 17: val_accuracy improved from 0.55680 to 0.55820, saving model to efficientnetB0.h5 1250/1250 [==============================] - 60s 48ms/step - loss: 1.3188 - accuracy: 0.6202 - val_loss: 1.6155 - val_accuracy: 0.5582 - lr: 0.0100 Epoch 18/100 1249/1250 [============================>.] - ETA: 0s - loss: 1.2923 - accuracy: 0.6272 Epoch 18: val_accuracy improved from 0.55820 to 0.56300, saving model to efficientnetB0.h5 1250/1250 [==============================] - 62s 49ms/step - loss: 1.2924 - accuracy: 0.6272 - val_loss: 1.5990 - val_accuracy: 0.5630 - lr: 0.0100 Epoch 19/100 1250/1250 [==============================] - ETA: 0s - loss: 1.2342 - accuracy: 0.6424 Epoch 19: val_accuracy improved from 0.56300 to 0.56610, saving model to efficientnetB0.h5 1250/1250 [==============================] - 65s 52ms/step - loss: 1.2342 - accuracy: 0.6424 - val_loss: 1.6066 - val_accuracy: 0.5661 - lr: 0.0100 Epoch 20/100 1249/1250 [============================>.] - ETA: 0s - loss: 1.2028 - accuracy: 0.6496 Epoch 20: val_accuracy did not improve from 0.56610 1250/1250 [==============================] - 63s 51ms/step - loss: 1.2029 - accuracy: 0.6496 - val_loss: 1.5962 - val_accuracy: 0.5654 - lr: 0.0100 Epoch 21/100 1249/1250 [============================>.] - ETA: 0s - loss: 1.1626 - accuracy: 0.6589 Epoch 21: val_accuracy improved from 0.56610 to 0.57370, saving model to efficientnetB0.h5 1250/1250 [==============================] - 61s 49ms/step - loss: 1.1625 - accuracy: 0.6589 - val_loss: 1.5868 - val_accuracy: 0.5737 - lr: 0.0100 Epoch 22/100 1249/1250 [============================>.] - ETA: 0s - loss: 1.1224 - accuracy: 0.6711 Epoch 22: val_accuracy did not improve from 0.57370 1250/1250 [==============================] - 63s 50ms/step - loss: 1.1222 - accuracy: 0.6711 - val_loss: 1.5892 - val_accuracy: 0.5726 - lr: 0.0100 Epoch 23/100 1249/1250 [============================>.] - ETA: 0s - loss: 1.0811 - accuracy: 0.6808 Epoch 23: val_accuracy improved from 0.57370 to 0.57530, saving model to efficientnetB0.h5 1250/1250 [==============================] - 62s 50ms/step - loss: 1.0810 - accuracy: 0.6808 - val_loss: 1.5981 - val_accuracy: 0.5753 - lr: 0.0100 Epoch 24/100 1249/1250 [============================>.] - ETA: 0s - loss: 1.0435 - accuracy: 0.6885 Epoch 24: val_accuracy did not improve from 0.57530 1250/1250 [==============================] - 61s 49ms/step - loss: 1.0435 - accuracy: 0.6884 - val_loss: 1.6121 - val_accuracy: 0.5732 - lr: 0.0100 Epoch 25/100 1250/1250 [==============================] - ETA: 0s - loss: 1.0111 - accuracy: 0.7009 Epoch 25: val_accuracy did not improve from 0.57530 1250/1250 [==============================] - 61s 49ms/step - loss: 1.0111 - accuracy: 0.7009 - val_loss: 1.6091 - val_accuracy: 0.5714 - lr: 0.0100 Epoch 26/100 1250/1250 [==============================] - ETA: 0s - loss: 0.9894 - accuracy: 0.7074 Epoch 26: val_accuracy did not improve from 0.57530 1250/1250 [==============================] - 61s 49ms/step - loss: 0.9894 - accuracy: 0.7074 - val_loss: 1.6411 - val_accuracy: 0.5715 - lr: 0.0100 Epoch 27/100 1249/1250 [============================>.] - ETA: 0s - loss: 0.9015 - accuracy: 0.7300 Epoch 27: val_accuracy improved from 0.57530 to 0.58240, saving model to efficientnetB0.h5 1250/1250 [==============================] - 61s 49ms/step - loss: 0.9014 - accuracy: 0.7301 - val_loss: 1.5788 - val_accuracy: 0.5824 - lr: 0.0020 Epoch 28/100 1250/1250 [==============================] - ETA: 0s - loss: 0.8705 - accuracy: 0.7379 Epoch 28: val_accuracy improved from 0.58240 to 0.58380, saving model to efficientnetB0.h5 1250/1250 [==============================] - 61s 49ms/step - loss: 0.8705 - accuracy: 0.7379 - val_loss: 1.5810 - val_accuracy: 0.5838 - lr: 0.0020 Epoch 29/100 1250/1250 [==============================] - ETA: 0s - loss: 0.8564 - accuracy: 0.7406 Epoch 29: val_accuracy improved from 0.58380 to 0.58420, saving model to efficientnetB0.h5 1250/1250 [==============================] - 62s 49ms/step - loss: 0.8564 - accuracy: 0.7406 - val_loss: 1.5814 - val_accuracy: 0.5842 - lr: 0.0020 Epoch 30/100 1250/1250 [==============================] - ETA: 0s - loss: 0.8495 - accuracy: 0.7443 Epoch 30: val_accuracy did not improve from 0.58420 1250/1250 [==============================] - 70s 56ms/step - loss: 0.8495 - accuracy: 0.7443 - val_loss: 1.5771 - val_accuracy: 0.5840 - lr: 0.0020 Epoch 31/100 1250/1250 [==============================] - ETA: 0s - loss: 0.8464 - accuracy: 0.7436 Epoch 31: val_accuracy improved from 0.58420 to 0.58470, saving model to efficientnetB0.h5 1250/1250 [==============================] - 71s 57ms/step - loss: 0.8464 - accuracy: 0.7436 - val_loss: 1.5825 - val_accuracy: 0.5847 - lr: 0.0020 Epoch 32/100 1250/1250 [==============================] - ETA: 0s - loss: 0.8279 - accuracy: 0.7502 Epoch 32: val_accuracy improved from 0.58470 to 0.58620, saving model to efficientnetB0.h5 1250/1250 [==============================] - 68s 54ms/step - loss: 0.8279 - accuracy: 0.7502 - val_loss: 1.5768 - val_accuracy: 0.5862 - lr: 0.0020 Epoch 33/100 1250/1250 [==============================] - ETA: 0s - loss: 0.8112 - accuracy: 0.7537 Epoch 33: val_accuracy improved from 0.58620 to 0.58670, saving model to efficientnetB0.h5 1250/1250 [==============================] - 70s 56ms/step - loss: 0.8112 - accuracy: 0.7537 - val_loss: 1.5841 - val_accuracy: 0.5867 - lr: 0.0020 Epoch 34/100 1250/1250 [==============================] - ETA: 0s - loss: 0.8132 - accuracy: 0.7556 Epoch 34: val_accuracy did not improve from 0.58670 1250/1250 [==============================] - 69s 55ms/step - loss: 0.8132 - accuracy: 0.7556 - val_loss: 1.5868 - val_accuracy: 0.5828 - lr: 0.0020 Epoch 35/100 1250/1250 [==============================] - ETA: 0s - loss: 0.8001 - accuracy: 0.7597 Epoch 35: val_accuracy did not improve from 0.58670 1250/1250 [==============================] - 70s 56ms/step - loss: 0.8001 - accuracy: 0.7597 - val_loss: 1.5921 - val_accuracy: 0.5847 - lr: 0.0020 Epoch 36/100 1250/1250 [==============================] - ETA: 0s - loss: 0.7879 - accuracy: 0.7616 Epoch 36: val_accuracy improved from 0.58670 to 0.58740, saving model to efficientnetB0.h5 1250/1250 [==============================] - 71s 57ms/step - loss: 0.7879 - accuracy: 0.7616 - val_loss: 1.5924 - val_accuracy: 0.5874 - lr: 0.0020 Epoch 37/100 1250/1250 [==============================] - ETA: 0s - loss: 0.7944 - accuracy: 0.7585 Epoch 37: val_accuracy did not improve from 0.58740 1250/1250 [==============================] - 71s 57ms/step - loss: 0.7944 - accuracy: 0.7585 - val_loss: 1.6001 - val_accuracy: 0.5831 - lr: 0.0020 Epoch 38/100 1250/1250 [==============================] - ETA: 0s - loss: 0.7698 - accuracy: 0.7679 Epoch 38: val_accuracy did not improve from 0.58740 1250/1250 [==============================] - 71s 57ms/step - loss: 0.7698 - accuracy: 0.7679 - val_loss: 1.5897 - val_accuracy: 0.5871 - lr: 0.0010 Epoch 39/100 1250/1250 [==============================] - ETA: 0s - loss: 0.7698 - accuracy: 0.7661 Epoch 39: val_accuracy did not improve from 0.58740 1250/1250 [==============================] - 72s 57ms/step - loss: 0.7698 - accuracy: 0.7661 - val_loss: 1.5934 - val_accuracy: 0.5851 - lr: 0.0010 Epoch 40/100 1250/1250 [==============================] - ETA: 0s - loss: 0.7645 - accuracy: 0.7679 Epoch 40: val_accuracy did not improve from 0.58740 1250/1250 [==============================] - 109s 87ms/step - loss: 0.7645 - accuracy: 0.7679 - val_loss: 1.6004 - val_accuracy: 0.5849 - lr: 0.0010 Epoch 41/100 1250/1250 [==============================] - ETA: 0s - loss: 0.7562 - accuracy: 0.7701 Epoch 41: val_accuracy improved from 0.58740 to 0.58800, saving model to efficientnetB0.h5 1250/1250 [==============================] - 85s 68ms/step - loss: 0.7562 - accuracy: 0.7701 - val_loss: 1.5969 - val_accuracy: 0.5880 - lr: 0.0010 Epoch 42/100 1250/1250 [==============================] - ETA: 0s - loss: 0.7495 - accuracy: 0.7723 Epoch 42: val_accuracy did not improve from 0.58800 1250/1250 [==============================] - 78s 63ms/step - loss: 0.7495 - accuracy: 0.7723 - val_loss: 1.6009 - val_accuracy: 0.5852 - lr: 0.0010 Epoch 42: early stopping Test accuracy: 0.5906999707221985
# load coarse label model
model = load_model('fine_final.h5')
loss,acc = model.evaluate(X_test,y_test)
print("Fine model, accuracy: {:5.2f}%".format(100*acc))
print("Fine model, loss:", loss)
313/313 [==============================] - 2s 6ms/step - loss: 1.5238 - accuracy: 0.6188 Fine model, accuracy: 61.88% Fine model, loss: 1.5237715244293213
# import cifar 10
(X_train, y_train), (X_test, y_test) = cifar100.load_data(label_mode="coarse")
assert X_train.shape == (50000, 32, 32, 3)
assert X_test.shape == (10000, 32, 32, 3)
assert y_train.shape == (50000, 1)
assert y_test.shape == (10000, 1)
# split train and validation set
from sklearn.model_selection import train_test_split
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=42, stratify=y_train)
print("X_train shape:", X_train.shape, "y_train shape:", y_train.shape)
print("X_test shape:", X_test.shape, "y_test shape:", y_test.shape)
# print unique classes count
print("Unique classes count:", len(np.unique(y_train)))
# print number of samples in each class
y_train_df = pd.DataFrame(y_train)
print(y_train_df[0].value_counts())
y_test_label = y_test
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
y_val = to_categorical(y_val)
print('after',y_train.shape,y_test.shape)
X_train shape: (40000, 32, 32, 3) y_train shape: (40000, 1) X_test shape: (10000, 32, 32, 3) y_test shape: (10000, 1) Unique classes count: 20 1 2000 18 2000 10 2000 5 2000 8 2000 6 2000 13 2000 7 2000 17 2000 15 2000 14 2000 4 2000 2 2000 3 2000 16 2000 11 2000 12 2000 0 2000 9 2000 19 2000 Name: 0, dtype: int64 after (40000, 20) (10000, 20)
plt.imshow(X_train[12])
plt.show()
X_train = X_train/255
X_test = X_test/255
X_val = X_val/255
X_train_augmented = X_train.copy()
y_train_augmented = y_train.copy()
X_train_aug = X_train.copy()
y_train_aug = y_train.copy()
# ImageDataGenerator rotation
datagen_rotate = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=90,
)
# ImageDataGenerator rotation
datagen_shear = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
shear_range=0.4
)
# ImageDataGenerator rotation
datagen_flip = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
horizontal_flip=True,
vertical_flip=True)
# ImageDataGenerator rotation
datagen_channel = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
channel_shift_range=0.5
)
datagen_rotate.fit(X_train_augmented)
X_train_augmented_rotate = datagen_rotate.flow(X_train_augmented,y_train_augmented, batch_size = X_train_augmented.shape[0], shuffle=False).next()
# change X_train_augmented to float32
X_train_augmented_rotate = X_train_augmented_rotate[0].astype('float32')
datagen_shear.fit(X_train_augmented)
X_train_augmented_shear = datagen_shear.flow(X_train_augmented,y_train_augmented, batch_size = X_train_augmented.shape[0], shuffle=False).next()
# change X_train_augmented to float32
X_train_augmented_shear = X_train_augmented_shear[0].astype('float32')
datagen_flip.fit(X_train_augmented)
X_train_augmented_flip = datagen_flip.flow(X_train_augmented,y_train_augmented, batch_size = X_train_augmented.shape[0], shuffle=False).next()
# change X_train_augmented to float32
X_train_augmented_flip = X_train_augmented_flip[0].astype('float32')
datagen_channel.fit(X_train_augmented)
X_train_augmented_channel = datagen_channel.flow(X_train_augmented,y_train_augmented, batch_size = X_train_augmented.shape[0], shuffle=False).next()
# change X_train_augmented to float32
X_train_augmented_channel = X_train_augmented_channel[0].astype('float32')
# concat the original and augmented data
X_train = np.concatenate((X_train,X_train_augmented_rotate),axis=0)
y_train = np.concatenate((y_train,y_train_augmented),axis=0)
# concat the original and augmented data
X_train = np.concatenate((X_train,X_train_augmented_shear),axis=0)
y_train = np.concatenate((y_train,y_train_augmented),axis=0)
# concat the original and augmented data
X_train = np.concatenate((X_train,X_train_augmented_flip),axis=0)
y_train = np.concatenate((y_train,y_train_augmented),axis=0)
# concat the original and augmented data
X_train = np.concatenate((X_train,X_train_augmented_channel),axis=0)
y_train = np.concatenate((y_train,y_train_augmented),axis=0)
plt.imshow(X_train[40012])
plt.show()
#names of each class
class_names = ['aquatic animal', 'fish','flowers','food_containers','fruits and vegetables','household electronic','household furniture','insects','large carnivores','large man made outdoor things','outdoor scenes','omnivores and herbivores','medium sized mammals','non-insect invertebrates','people','reptiles','small mammals','trees','vehicles 1','vehicles 2']
# plot the first 25 images in the training set and display the class name below each image
plt.figure(figsize=(10,10))
for i in range(20):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(X_train[i], cmap=plt.cm.binary)
plt.xlabel(class_names[np.argmax(y_train[i])])
plt.show()
model = Sequential()
# Convolutional Layer
model.add(Conv2D(filters=32, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=32, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
# Pooling layer
model.add(MaxPooling2D(pool_size=(2, 2)))
# Dropout layers
model.add(Dropout(0.3))
model.add(Conv2D(filters=64, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=64, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
# Pooling layer
model.add(MaxPooling2D(pool_size=(3, 3)))
# Dropout layers
model.add(Dropout(0.35))
# Convolutional Layer
model.add(Conv2D(filters=128, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=128, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=128, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=128, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
# Pooling layer
model.add(MaxPooling2D(pool_size=(3, 3)))
# Dropout layers
model.add(Dropout(0.35))
# Convolutional Layer
model.add(Conv2D(filters=256, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=256, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
# Pooling layer
model.add(MaxPooling2D(pool_size=(2, 2)))
# Dropout layers
model.add(Dropout(0.3))
model.add(Flatten())
# model.add(Dropout(0.2))
model.add(Dense(128, activation='elu'))
model.add(Dropout(0.3))
model.add(Dense(20, activation='softmax',kernel_regularizer=tensorflow.keras.regularizers.L2(0.02)))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer='adamax',
metrics=['accuracy'])
early_stopping = EarlyStopping(monitor='val_loss', patience=20, verbose=1)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=5, min_lr=0.001)
mc = ModelCheckpoint('coarselabel3.h5', monitor='val_accuracy', mode='max', verbose=1, save_best_only=True)
h_callback = model.fit(X_train, y_train, epochs = 100,validation_data=(X_val, y_val),batch_size=256,callbacks=[early_stopping,mc])
# Plot train vs test loss during training
plot_loss(h_callback.history['loss'], h_callback.history['val_loss'])
plot_accuracy(h_callback.history['accuracy'], h_callback.history['val_accuracy'])
Model: "sequential_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d_10 (Conv2D) (None, 32, 32, 32) 896
batch_normalization_10 (Bat (None, 32, 32, 32) 128
chNormalization)
conv2d_11 (Conv2D) (None, 32, 32, 32) 9248
batch_normalization_11 (Bat (None, 32, 32, 32) 128
chNormalization)
max_pooling2d_4 (MaxPooling (None, 16, 16, 32) 0
2D)
dropout_5 (Dropout) (None, 16, 16, 32) 0
conv2d_12 (Conv2D) (None, 16, 16, 64) 18496
batch_normalization_12 (Bat (None, 16, 16, 64) 256
chNormalization)
conv2d_13 (Conv2D) (None, 16, 16, 64) 36928
batch_normalization_13 (Bat (None, 16, 16, 64) 256
chNormalization)
max_pooling2d_5 (MaxPooling (None, 8, 8, 64) 0
2D)
dropout_6 (Dropout) (None, 8, 8, 64) 0
conv2d_14 (Conv2D) (None, 8, 8, 128) 73856
batch_normalization_14 (Bat (None, 8, 8, 128) 512
chNormalization)
conv2d_15 (Conv2D) (None, 8, 8, 128) 147584
batch_normalization_15 (Bat (None, 8, 8, 128) 512
chNormalization)
conv2d_16 (Conv2D) (None, 8, 8, 128) 147584
batch_normalization_16 (Bat (None, 8, 8, 128) 512
chNormalization)
conv2d_17 (Conv2D) (None, 8, 8, 128) 147584
batch_normalization_17 (Bat (None, 8, 8, 128) 512
chNormalization)
max_pooling2d_6 (MaxPooling (None, 4, 4, 128) 0
2D)
dropout_7 (Dropout) (None, 4, 4, 128) 0
conv2d_18 (Conv2D) (None, 4, 4, 256) 295168
batch_normalization_18 (Bat (None, 4, 4, 256) 1024
chNormalization)
conv2d_19 (Conv2D) (None, 4, 4, 256) 590080
batch_normalization_19 (Bat (None, 4, 4, 256) 1024
chNormalization)
max_pooling2d_7 (MaxPooling (None, 2, 2, 256) 0
2D)
dropout_8 (Dropout) (None, 2, 2, 256) 0
flatten_1 (Flatten) (None, 1024) 0
dense_2 (Dense) (None, 128) 131200
dropout_9 (Dropout) (None, 128) 0
dense_3 (Dense) (None, 20) 2580
=================================================================
Total params: 1,606,068
Trainable params: 1,603,636
Non-trainable params: 2,432
_________________________________________________________________
Epoch 1/100
781/782 [============================>.] - ETA: 0s - loss: 2.8704 - accuracy: 0.2494
Epoch 1: val_accuracy improved from -inf to 0.36320, saving model to coarselabel2.h5
782/782 [==============================] - 35s 43ms/step - loss: 2.8702 - accuracy: 0.2494 - val_loss: 2.2395 - val_accuracy: 0.3632
Epoch 2/100
782/782 [==============================] - ETA: 0s - loss: 2.0677 - accuracy: 0.3903
Epoch 2: val_accuracy improved from 0.36320 to 0.44930, saving model to coarselabel2.h5
782/782 [==============================] - 33s 42ms/step - loss: 2.0677 - accuracy: 0.3903 - val_loss: 1.8233 - val_accuracy: 0.4493
Epoch 3/100
781/782 [============================>.] - ETA: 0s - loss: 1.7503 - accuracy: 0.4669
Epoch 3: val_accuracy improved from 0.44930 to 0.54100, saving model to coarselabel2.h5
782/782 [==============================] - 33s 42ms/step - loss: 1.7502 - accuracy: 0.4670 - val_loss: 1.4884 - val_accuracy: 0.5410
Epoch 4/100
781/782 [============================>.] - ETA: 0s - loss: 1.5493 - accuracy: 0.5236
Epoch 4: val_accuracy improved from 0.54100 to 0.57590, saving model to coarselabel2.h5
782/782 [==============================] - 33s 42ms/step - loss: 1.5492 - accuracy: 0.5237 - val_loss: 1.3631 - val_accuracy: 0.5759
Epoch 5/100
781/782 [============================>.] - ETA: 0s - loss: 1.3926 - accuracy: 0.5713
Epoch 5: val_accuracy improved from 0.57590 to 0.59470, saving model to coarselabel2.h5
782/782 [==============================] - 33s 42ms/step - loss: 1.3926 - accuracy: 0.5713 - val_loss: 1.3161 - val_accuracy: 0.5947
Epoch 6/100
781/782 [============================>.] - ETA: 0s - loss: 1.2646 - accuracy: 0.6085
Epoch 6: val_accuracy improved from 0.59470 to 0.64010, saving model to coarselabel2.h5
782/782 [==============================] - 33s 42ms/step - loss: 1.2646 - accuracy: 0.6085 - val_loss: 1.1818 - val_accuracy: 0.6401
Epoch 7/100
781/782 [============================>.] - ETA: 0s - loss: 1.1619 - accuracy: 0.6405
Epoch 7: val_accuracy improved from 0.64010 to 0.64160, saving model to coarselabel2.h5
782/782 [==============================] - 33s 43ms/step - loss: 1.1619 - accuracy: 0.6405 - val_loss: 1.1665 - val_accuracy: 0.6416
Epoch 8/100
781/782 [============================>.] - ETA: 0s - loss: 1.0753 - accuracy: 0.6682
Epoch 8: val_accuracy improved from 0.64160 to 0.66600, saving model to coarselabel2.h5
782/782 [==============================] - 33s 42ms/step - loss: 1.0753 - accuracy: 0.6682 - val_loss: 1.0997 - val_accuracy: 0.6660
Epoch 9/100
781/782 [============================>.] - ETA: 0s - loss: 0.9998 - accuracy: 0.6918
Epoch 9: val_accuracy improved from 0.66600 to 0.67060, saving model to coarselabel2.h5
782/782 [==============================] - 33s 42ms/step - loss: 0.9998 - accuracy: 0.6918 - val_loss: 1.0988 - val_accuracy: 0.6706
Epoch 10/100
781/782 [============================>.] - ETA: 0s - loss: 0.9310 - accuracy: 0.7134
Epoch 10: val_accuracy did not improve from 0.67060
782/782 [==============================] - 33s 42ms/step - loss: 0.9310 - accuracy: 0.7134 - val_loss: 1.0920 - val_accuracy: 0.6705
Epoch 11/100
781/782 [============================>.] - ETA: 0s - loss: 0.8766 - accuracy: 0.7307
Epoch 11: val_accuracy improved from 0.67060 to 0.67990, saving model to coarselabel2.h5
782/782 [==============================] - 33s 42ms/step - loss: 0.8765 - accuracy: 0.7307 - val_loss: 1.1043 - val_accuracy: 0.6799
Epoch 12/100
782/782 [==============================] - ETA: 0s - loss: 0.8270 - accuracy: 0.7462
Epoch 12: val_accuracy improved from 0.67990 to 0.68630, saving model to coarselabel2.h5
782/782 [==============================] - 33s 42ms/step - loss: 0.8270 - accuracy: 0.7462 - val_loss: 1.0945 - val_accuracy: 0.6863
Epoch 13/100
781/782 [============================>.] - ETA: 0s - loss: 0.7796 - accuracy: 0.7612
Epoch 13: val_accuracy did not improve from 0.68630
782/782 [==============================] - 33s 42ms/step - loss: 0.7797 - accuracy: 0.7612 - val_loss: 1.0980 - val_accuracy: 0.6824
Epoch 14/100
781/782 [============================>.] - ETA: 0s - loss: 0.7389 - accuracy: 0.7731
Epoch 14: val_accuracy improved from 0.68630 to 0.69170, saving model to coarselabel2.h5
782/782 [==============================] - 33s 42ms/step - loss: 0.7391 - accuracy: 0.7730 - val_loss: 1.0788 - val_accuracy: 0.6917
Epoch 15/100
781/782 [============================>.] - ETA: 0s - loss: 0.7058 - accuracy: 0.7845
Epoch 15: val_accuracy improved from 0.69170 to 0.70200, saving model to coarselabel2.h5
782/782 [==============================] - 33s 42ms/step - loss: 0.7059 - accuracy: 0.7845 - val_loss: 1.0694 - val_accuracy: 0.7020
Epoch 16/100
781/782 [============================>.] - ETA: 0s - loss: 0.6765 - accuracy: 0.7941
Epoch 16: val_accuracy did not improve from 0.70200
782/782 [==============================] - 33s 42ms/step - loss: 0.6765 - accuracy: 0.7941 - val_loss: 1.0867 - val_accuracy: 0.6926
Epoch 17/100
781/782 [============================>.] - ETA: 0s - loss: 0.6494 - accuracy: 0.8025
Epoch 17: val_accuracy did not improve from 0.70200
782/782 [==============================] - 33s 42ms/step - loss: 0.6494 - accuracy: 0.8024 - val_loss: 1.1162 - val_accuracy: 0.6922
Epoch 18/100
781/782 [============================>.] - ETA: 0s - loss: 0.6210 - accuracy: 0.8108
Epoch 18: val_accuracy did not improve from 0.70200
782/782 [==============================] - 33s 42ms/step - loss: 0.6210 - accuracy: 0.8108 - val_loss: 1.1239 - val_accuracy: 0.6930
Epoch 19/100
781/782 [============================>.] - ETA: 0s - loss: 0.6024 - accuracy: 0.8160
Epoch 19: val_accuracy improved from 0.70200 to 0.70320, saving model to coarselabel2.h5
782/782 [==============================] - 33s 42ms/step - loss: 0.6025 - accuracy: 0.8160 - val_loss: 1.0813 - val_accuracy: 0.7032
Epoch 20/100
781/782 [============================>.] - ETA: 0s - loss: 0.5785 - accuracy: 0.8241
Epoch 20: val_accuracy did not improve from 0.70320
782/782 [==============================] - 33s 42ms/step - loss: 0.5785 - accuracy: 0.8241 - val_loss: 1.1036 - val_accuracy: 0.6973
Epoch 21/100
781/782 [============================>.] - ETA: 0s - loss: 0.5615 - accuracy: 0.8284
Epoch 21: val_accuracy did not improve from 0.70320
782/782 [==============================] - 33s 42ms/step - loss: 0.5616 - accuracy: 0.8284 - val_loss: 1.1654 - val_accuracy: 0.6912
Epoch 22/100
781/782 [============================>.] - ETA: 0s - loss: 0.5422 - accuracy: 0.8355
Epoch 22: val_accuracy did not improve from 0.70320
782/782 [==============================] - 33s 42ms/step - loss: 0.5423 - accuracy: 0.8355 - val_loss: 1.1335 - val_accuracy: 0.7024
Epoch 23/100
781/782 [============================>.] - ETA: 0s - loss: 0.5257 - accuracy: 0.8392
Epoch 23: val_accuracy did not improve from 0.70320
782/782 [==============================] - 33s 42ms/step - loss: 0.5257 - accuracy: 0.8392 - val_loss: 1.1437 - val_accuracy: 0.7006
Epoch 24/100
782/782 [==============================] - ETA: 0s - loss: 0.5119 - accuracy: 0.8454
Epoch 24: val_accuracy did not improve from 0.70320
782/782 [==============================] - 33s 42ms/step - loss: 0.5119 - accuracy: 0.8454 - val_loss: 1.1522 - val_accuracy: 0.6968
Epoch 25/100
782/782 [==============================] - ETA: 0s - loss: 0.4999 - accuracy: 0.8473
Epoch 25: val_accuracy did not improve from 0.70320
782/782 [==============================] - 33s 42ms/step - loss: 0.4999 - accuracy: 0.8473 - val_loss: 1.1788 - val_accuracy: 0.6936
Epoch 26/100
782/782 [==============================] - ETA: 0s - loss: 0.4846 - accuracy: 0.8524
Epoch 26: val_accuracy did not improve from 0.70320
782/782 [==============================] - 34s 43ms/step - loss: 0.4846 - accuracy: 0.8524 - val_loss: 1.1707 - val_accuracy: 0.6956
Epoch 27/100
781/782 [============================>.] - ETA: 0s - loss: 0.4711 - accuracy: 0.8566
Epoch 27: val_accuracy did not improve from 0.70320
782/782 [==============================] - 33s 42ms/step - loss: 0.4711 - accuracy: 0.8566 - val_loss: 1.1502 - val_accuracy: 0.7032
Epoch 28/100
781/782 [============================>.] - ETA: 0s - loss: 0.4635 - accuracy: 0.8592
Epoch 28: val_accuracy did not improve from 0.70320
782/782 [==============================] - 33s 42ms/step - loss: 0.4635 - accuracy: 0.8592 - val_loss: 1.1985 - val_accuracy: 0.6982
Epoch 29/100
782/782 [==============================] - ETA: 0s - loss: 0.4486 - accuracy: 0.8638
Epoch 29: val_accuracy did not improve from 0.70320
782/782 [==============================] - 33s 42ms/step - loss: 0.4486 - accuracy: 0.8638 - val_loss: 1.1824 - val_accuracy: 0.6984
Epoch 30/100
781/782 [============================>.] - ETA: 0s - loss: 0.4426 - accuracy: 0.8646
Epoch 30: val_accuracy did not improve from 0.70320
782/782 [==============================] - 33s 42ms/step - loss: 0.4427 - accuracy: 0.8646 - val_loss: 1.1852 - val_accuracy: 0.6987
Epoch 31/100
781/782 [============================>.] - ETA: 0s - loss: 0.4281 - accuracy: 0.8694
Epoch 31: val_accuracy did not improve from 0.70320
782/782 [==============================] - 33s 42ms/step - loss: 0.4281 - accuracy: 0.8694 - val_loss: 1.2137 - val_accuracy: 0.6996
Epoch 32/100
781/782 [============================>.] - ETA: 0s - loss: 0.4207 - accuracy: 0.8718
Epoch 32: val_accuracy did not improve from 0.70320
782/782 [==============================] - 33s 42ms/step - loss: 0.4207 - accuracy: 0.8718 - val_loss: 1.2043 - val_accuracy: 0.7019
Epoch 33/100
781/782 [============================>.] - ETA: 0s - loss: 0.4126 - accuracy: 0.8733
Epoch 33: val_accuracy did not improve from 0.70320
782/782 [==============================] - 33s 42ms/step - loss: 0.4127 - accuracy: 0.8733 - val_loss: 1.2252 - val_accuracy: 0.7002
Epoch 34/100
781/782 [============================>.] - ETA: 0s - loss: 0.4055 - accuracy: 0.8759
Epoch 34: val_accuracy did not improve from 0.70320
782/782 [==============================] - 33s 42ms/step - loss: 0.4055 - accuracy: 0.8759 - val_loss: 1.2262 - val_accuracy: 0.7000
Epoch 35/100
781/782 [============================>.] - ETA: 0s - loss: 0.3936 - accuracy: 0.8797
Epoch 35: val_accuracy improved from 0.70320 to 0.70470, saving model to coarselabel2.h5
782/782 [==============================] - 33s 42ms/step - loss: 0.3937 - accuracy: 0.8797 - val_loss: 1.2228 - val_accuracy: 0.7047
Epoch 35: early stopping
visualkeras.layered_view(model,legend=True).show() # display using your system viewer
model = Sequential()
# Convolutional Layer
model.add(Conv2D(filters=32, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=32, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
# Pooling layer
model.add(MaxPooling2D(pool_size=(2, 2)))
# Dropout layers
model.add(Dropout(0.3))
model.add(Conv2D(filters=64, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=64, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
# Pooling layer
model.add(MaxPooling2D(pool_size=(2, 2)))
# Dropout layers
model.add(Dropout(0.3))
# Convolutional Layer
model.add(Conv2D(filters=128, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=128, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=128, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=128, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
# Pooling layer
model.add(MaxPooling2D(pool_size=(2, 2)))
# Dropout layers
model.add(Dropout(0.3))
# Convolutional Layer
model.add(Conv2D(filters=256, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=256, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
# Pooling layer
model.add(MaxPooling2D(pool_size=(2, 2)))
# Dropout layers
model.add(Dropout(0.3))
model.add(Flatten())
# model.add(Dropout(0.2))
model.add(Dense(128, activation='elu'))
model.add(Dropout(0.3))
model.add(Dense(20, activation='softmax',kernel_regularizer=tensorflow.keras.regularizers.L2(0.02)))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer='adamax',
metrics=['accuracy'])
early_stopping = EarlyStopping(monitor='val_loss', patience=20, verbose=1)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=5, min_lr=0.001)
mc = ModelCheckpoint('coarselabel2.h5', monitor='val_accuracy', mode='max', verbose=1, save_best_only=True)
h_callback = model.fit(X_train, y_train, epochs = 100,validation_data=(X_val, y_val),batch_size=256,callbacks=[early_stopping,mc])
# Plot train vs test loss during training
plot_loss(h_callback.history['loss'], h_callback.history['val_loss'])
plot_accuracy(h_callback.history['accuracy'], h_callback.history['val_accuracy'])
Model: "sequential_2"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d_20 (Conv2D) (None, 32, 32, 32) 896
batch_normalization_20 (Bat (None, 32, 32, 32) 128
chNormalization)
conv2d_21 (Conv2D) (None, 32, 32, 32) 9248
batch_normalization_21 (Bat (None, 32, 32, 32) 128
chNormalization)
max_pooling2d_8 (MaxPooling (None, 16, 16, 32) 0
2D)
dropout_10 (Dropout) (None, 16, 16, 32) 0
conv2d_22 (Conv2D) (None, 16, 16, 64) 18496
batch_normalization_22 (Bat (None, 16, 16, 64) 256
chNormalization)
conv2d_23 (Conv2D) (None, 16, 16, 64) 36928
batch_normalization_23 (Bat (None, 16, 16, 64) 256
chNormalization)
max_pooling2d_9 (MaxPooling (None, 8, 8, 64) 0
2D)
dropout_11 (Dropout) (None, 8, 8, 64) 0
conv2d_24 (Conv2D) (None, 8, 8, 128) 73856
batch_normalization_24 (Bat (None, 8, 8, 128) 512
chNormalization)
conv2d_25 (Conv2D) (None, 8, 8, 128) 147584
batch_normalization_25 (Bat (None, 8, 8, 128) 512
chNormalization)
conv2d_26 (Conv2D) (None, 8, 8, 128) 147584
batch_normalization_26 (Bat (None, 8, 8, 128) 512
chNormalization)
conv2d_27 (Conv2D) (None, 8, 8, 128) 147584
batch_normalization_27 (Bat (None, 8, 8, 128) 512
chNormalization)
max_pooling2d_10 (MaxPoolin (None, 4, 4, 128) 0
g2D)
dropout_12 (Dropout) (None, 4, 4, 128) 0
conv2d_28 (Conv2D) (None, 4, 4, 256) 295168
batch_normalization_28 (Bat (None, 4, 4, 256) 1024
chNormalization)
conv2d_29 (Conv2D) (None, 4, 4, 256) 590080
batch_normalization_29 (Bat (None, 4, 4, 256) 1024
chNormalization)
max_pooling2d_11 (MaxPoolin (None, 2, 2, 256) 0
g2D)
dropout_13 (Dropout) (None, 2, 2, 256) 0
flatten_2 (Flatten) (None, 1024) 0
dense_4 (Dense) (None, 128) 131200
dropout_14 (Dropout) (None, 128) 0
dense_5 (Dense) (None, 20) 2580
=================================================================
Total params: 1,606,068
Trainable params: 1,603,636
Non-trainable params: 2,432
_________________________________________________________________
Epoch 1/100
781/782 [============================>.] - ETA: 0s - loss: 2.8009 - accuracy: 0.2570
Epoch 1: val_accuracy improved from -inf to 0.37090, saving model to coarselabel2.h5
782/782 [==============================] - 34s 42ms/step - loss: 2.8008 - accuracy: 0.2571 - val_loss: 2.2057 - val_accuracy: 0.3709
Epoch 2/100
781/782 [============================>.] - ETA: 0s - loss: 2.0278 - accuracy: 0.3996
Epoch 2: val_accuracy improved from 0.37090 to 0.47860, saving model to coarselabel2.h5
782/782 [==============================] - 32s 42ms/step - loss: 2.0277 - accuracy: 0.3996 - val_loss: 1.7298 - val_accuracy: 0.4786
Epoch 3/100
781/782 [============================>.] - ETA: 0s - loss: 1.7149 - accuracy: 0.4773
Epoch 3: val_accuracy improved from 0.47860 to 0.52740, saving model to coarselabel2.h5
782/782 [==============================] - 33s 42ms/step - loss: 1.7150 - accuracy: 0.4773 - val_loss: 1.5117 - val_accuracy: 0.5274
Epoch 4/100
781/782 [============================>.] - ETA: 0s - loss: 1.5131 - accuracy: 0.5344
Epoch 4: val_accuracy improved from 0.52740 to 0.58090, saving model to coarselabel2.h5
782/782 [==============================] - 33s 42ms/step - loss: 1.5131 - accuracy: 0.5344 - val_loss: 1.3515 - val_accuracy: 0.5809
Epoch 5/100
781/782 [============================>.] - ETA: 0s - loss: 1.3598 - accuracy: 0.5812
Epoch 5: val_accuracy improved from 0.58090 to 0.60880, saving model to coarselabel2.h5
782/782 [==============================] - 33s 42ms/step - loss: 1.3598 - accuracy: 0.5812 - val_loss: 1.2569 - val_accuracy: 0.6088
Epoch 6/100
781/782 [============================>.] - ETA: 0s - loss: 1.2413 - accuracy: 0.6168
Epoch 6: val_accuracy improved from 0.60880 to 0.64750, saving model to coarselabel2.h5
782/782 [==============================] - 33s 42ms/step - loss: 1.2413 - accuracy: 0.6167 - val_loss: 1.1361 - val_accuracy: 0.6475
Epoch 7/100
781/782 [============================>.] - ETA: 0s - loss: 1.1449 - accuracy: 0.6459
Epoch 7: val_accuracy improved from 0.64750 to 0.66590, saving model to coarselabel2.h5
782/782 [==============================] - 33s 42ms/step - loss: 1.1448 - accuracy: 0.6459 - val_loss: 1.0847 - val_accuracy: 0.6659
Epoch 8/100
781/782 [============================>.] - ETA: 0s - loss: 1.0569 - accuracy: 0.6725
Epoch 8: val_accuracy did not improve from 0.66590
782/782 [==============================] - 34s 44ms/step - loss: 1.0570 - accuracy: 0.6725 - val_loss: 1.1102 - val_accuracy: 0.6641
Epoch 9/100
781/782 [============================>.] - ETA: 0s - loss: 0.9824 - accuracy: 0.6968
Epoch 9: val_accuracy improved from 0.66590 to 0.67420, saving model to coarselabel2.h5
782/782 [==============================] - 34s 44ms/step - loss: 0.9824 - accuracy: 0.6968 - val_loss: 1.0894 - val_accuracy: 0.6742
Epoch 10/100
782/782 [==============================] - ETA: 0s - loss: 0.9168 - accuracy: 0.7178
Epoch 10: val_accuracy improved from 0.67420 to 0.67760, saving model to coarselabel2.h5
782/782 [==============================] - 34s 44ms/step - loss: 0.9168 - accuracy: 0.7178 - val_loss: 1.0706 - val_accuracy: 0.6776
Epoch 11/100
782/782 [==============================] - ETA: 0s - loss: 0.8675 - accuracy: 0.7331
Epoch 11: val_accuracy improved from 0.67760 to 0.69170, saving model to coarselabel2.h5
782/782 [==============================] - 33s 43ms/step - loss: 0.8675 - accuracy: 0.7331 - val_loss: 1.0263 - val_accuracy: 0.6917
Epoch 12/100
781/782 [============================>.] - ETA: 0s - loss: 0.8143 - accuracy: 0.7504
Epoch 12: val_accuracy did not improve from 0.69170
782/782 [==============================] - 33s 42ms/step - loss: 0.8143 - accuracy: 0.7504 - val_loss: 1.0653 - val_accuracy: 0.6892
Epoch 13/100
782/782 [==============================] - ETA: 0s - loss: 0.7705 - accuracy: 0.7636
Epoch 13: val_accuracy did not improve from 0.69170
782/782 [==============================] - 34s 43ms/step - loss: 0.7705 - accuracy: 0.7636 - val_loss: 1.0759 - val_accuracy: 0.6898
Epoch 14/100
781/782 [============================>.] - ETA: 0s - loss: 0.7342 - accuracy: 0.7750
Epoch 14: val_accuracy improved from 0.69170 to 0.69640, saving model to coarselabel2.h5
782/782 [==============================] - 33s 43ms/step - loss: 0.7342 - accuracy: 0.7750 - val_loss: 1.0752 - val_accuracy: 0.6964
Epoch 15/100
781/782 [============================>.] - ETA: 0s - loss: 0.6966 - accuracy: 0.7868
Epoch 15: val_accuracy did not improve from 0.69640
782/782 [==============================] - 31s 39ms/step - loss: 0.6967 - accuracy: 0.7868 - val_loss: 1.0871 - val_accuracy: 0.6908
Epoch 16/100
781/782 [============================>.] - ETA: 0s - loss: 0.6662 - accuracy: 0.7965
Epoch 16: val_accuracy improved from 0.69640 to 0.70260, saving model to coarselabel2.h5
782/782 [==============================] - 31s 39ms/step - loss: 0.6662 - accuracy: 0.7966 - val_loss: 1.0549 - val_accuracy: 0.7026
Epoch 17/100
781/782 [============================>.] - ETA: 0s - loss: 0.6396 - accuracy: 0.8052
Epoch 17: val_accuracy did not improve from 0.70260
782/782 [==============================] - 31s 39ms/step - loss: 0.6397 - accuracy: 0.8052 - val_loss: 1.0742 - val_accuracy: 0.7026
Epoch 18/100
781/782 [============================>.] - ETA: 0s - loss: 0.6146 - accuracy: 0.8123
Epoch 18: val_accuracy did not improve from 0.70260
782/782 [==============================] - 32s 41ms/step - loss: 0.6146 - accuracy: 0.8124 - val_loss: 1.1094 - val_accuracy: 0.6953
Epoch 19/100
782/782 [==============================] - ETA: 0s - loss: 0.5951 - accuracy: 0.8189
Epoch 19: val_accuracy improved from 0.70260 to 0.70840, saving model to coarselabel2.h5
782/782 [==============================] - 33s 43ms/step - loss: 0.5951 - accuracy: 0.8189 - val_loss: 1.0802 - val_accuracy: 0.7084
Epoch 20/100
782/782 [==============================] - ETA: 0s - loss: 0.5761 - accuracy: 0.8242
Epoch 20: val_accuracy did not improve from 0.70840
782/782 [==============================] - 31s 40ms/step - loss: 0.5761 - accuracy: 0.8242 - val_loss: 1.0698 - val_accuracy: 0.7049
Epoch 21/100
781/782 [============================>.] - ETA: 0s - loss: 0.5566 - accuracy: 0.8303
Epoch 21: val_accuracy did not improve from 0.70840
782/782 [==============================] - 33s 43ms/step - loss: 0.5566 - accuracy: 0.8303 - val_loss: 1.1265 - val_accuracy: 0.7010
Epoch 22/100
781/782 [============================>.] - ETA: 0s - loss: 0.5349 - accuracy: 0.8384
Epoch 22: val_accuracy did not improve from 0.70840
782/782 [==============================] - 30s 39ms/step - loss: 0.5349 - accuracy: 0.8384 - val_loss: 1.1417 - val_accuracy: 0.6930
Epoch 23/100
781/782 [============================>.] - ETA: 0s - loss: 0.5195 - accuracy: 0.8420
Epoch 23: val_accuracy did not improve from 0.70840
782/782 [==============================] - 30s 38ms/step - loss: 0.5196 - accuracy: 0.8420 - val_loss: 1.1217 - val_accuracy: 0.7044
Epoch 24/100
781/782 [============================>.] - ETA: 0s - loss: 0.5016 - accuracy: 0.8476
Epoch 24: val_accuracy did not improve from 0.70840
782/782 [==============================] - 30s 38ms/step - loss: 0.5016 - accuracy: 0.8476 - val_loss: 1.1521 - val_accuracy: 0.7020
Epoch 25/100
781/782 [============================>.] - ETA: 0s - loss: 0.4928 - accuracy: 0.8499
Epoch 25: val_accuracy did not improve from 0.70840
782/782 [==============================] - 30s 38ms/step - loss: 0.4928 - accuracy: 0.8499 - val_loss: 1.1512 - val_accuracy: 0.7074
Epoch 26/100
781/782 [============================>.] - ETA: 0s - loss: 0.4817 - accuracy: 0.8536
Epoch 26: val_accuracy did not improve from 0.70840
782/782 [==============================] - 33s 42ms/step - loss: 0.4817 - accuracy: 0.8536 - val_loss: 1.1560 - val_accuracy: 0.7035
Epoch 27/100
781/782 [============================>.] - ETA: 0s - loss: 0.4681 - accuracy: 0.8578
Epoch 27: val_accuracy did not improve from 0.70840
782/782 [==============================] - 33s 42ms/step - loss: 0.4681 - accuracy: 0.8578 - val_loss: 1.1443 - val_accuracy: 0.7051
Epoch 28/100
781/782 [============================>.] - ETA: 0s - loss: 0.4590 - accuracy: 0.8598
Epoch 28: val_accuracy did not improve from 0.70840
782/782 [==============================] - 33s 42ms/step - loss: 0.4590 - accuracy: 0.8598 - val_loss: 1.1487 - val_accuracy: 0.7083
Epoch 29/100
781/782 [============================>.] - ETA: 0s - loss: 0.4451 - accuracy: 0.8647
Epoch 29: val_accuracy did not improve from 0.70840
782/782 [==============================] - 33s 42ms/step - loss: 0.4452 - accuracy: 0.8647 - val_loss: 1.1682 - val_accuracy: 0.6991
Epoch 30/100
782/782 [==============================] - ETA: 0s - loss: 0.4358 - accuracy: 0.8672
Epoch 30: val_accuracy did not improve from 0.70840
782/782 [==============================] - 33s 42ms/step - loss: 0.4358 - accuracy: 0.8672 - val_loss: 1.1818 - val_accuracy: 0.7061
Epoch 31/100
781/782 [============================>.] - ETA: 0s - loss: 0.4288 - accuracy: 0.8687
Epoch 31: val_accuracy did not improve from 0.70840
782/782 [==============================] - 33s 42ms/step - loss: 0.4287 - accuracy: 0.8687 - val_loss: 1.1674 - val_accuracy: 0.7046
Epoch 31: early stopping
del model
gc.collect()
26397
model = Sequential()
# Convolutional Layer
model.add(Conv2D(filters=32, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=32, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
# Pooling layer
model.add(MaxPooling2D(pool_size=(2, 2)))
# Dropout layers
model.add(Dropout(0.4))
model.add(Conv2D(filters=64, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=64, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
# Pooling layer
model.add(MaxPooling2D(pool_size=(2, 2)))
# Dropout layers
model.add(Dropout(0.4))
# Convolutional Layer
model.add(Conv2D(filters=128, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=128, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
# Pooling layer
model.add(MaxPooling2D(pool_size=(2, 2)))
# Dropout layers
model.add(Dropout(0.4))
# Convolutional Layer
model.add(Conv2D(filters=256, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(filters=256, kernel_size=(3, 3), input_shape=(32, 32, 3), activation='elu', padding='same'))
model.add(BatchNormalization())
# Pooling layer
model.add(MaxPooling2D(pool_size=(2, 2)))
# Dropout layers
model.add(Dropout(0.4))
model.add(Flatten())
# model.add(Dropout(0.2))
model.add(Dense(128, activation='elu'))
model.add(Dropout(0.4))
model.add(Dense(20, activation='softmax',kernel_regularizer=tensorflow.keras.regularizers.L2(0.02)))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer='adamax',
metrics=['accuracy'])
early_stopping = EarlyStopping(monitor='val_loss', patience=10, verbose=1)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=5, min_lr=0.001)
mc = ModelCheckpoint('coarselabel3.h5', monitor='val_accuracy', mode='max', verbose=1, save_best_only=True)
h_callback = model.fit(X_train, y_train, epochs = 100,validation_data=(X_val, y_val),batch_size=256,callbacks=[early_stopping,mc])
# Plot train vs test loss during training
plot_loss(h_callback.history['loss'], h_callback.history['val_loss'])
plot_accuracy(h_callback.history['accuracy'], h_callback.history['val_accuracy'])
Model: "sequential_4"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d_38 (Conv2D) (None, 32, 32, 32) 896
batch_normalization_38 (Bat (None, 32, 32, 32) 128
chNormalization)
conv2d_39 (Conv2D) (None, 32, 32, 32) 9248
batch_normalization_39 (Bat (None, 32, 32, 32) 128
chNormalization)
max_pooling2d_16 (MaxPoolin (None, 16, 16, 32) 0
g2D)
dropout_20 (Dropout) (None, 16, 16, 32) 0
conv2d_40 (Conv2D) (None, 16, 16, 64) 18496
batch_normalization_40 (Bat (None, 16, 16, 64) 256
chNormalization)
conv2d_41 (Conv2D) (None, 16, 16, 64) 36928
batch_normalization_41 (Bat (None, 16, 16, 64) 256
chNormalization)
max_pooling2d_17 (MaxPoolin (None, 8, 8, 64) 0
g2D)
dropout_21 (Dropout) (None, 8, 8, 64) 0
conv2d_42 (Conv2D) (None, 8, 8, 128) 73856
batch_normalization_42 (Bat (None, 8, 8, 128) 512
chNormalization)
conv2d_43 (Conv2D) (None, 8, 8, 128) 147584
batch_normalization_43 (Bat (None, 8, 8, 128) 512
chNormalization)
max_pooling2d_18 (MaxPoolin (None, 4, 4, 128) 0
g2D)
dropout_22 (Dropout) (None, 4, 4, 128) 0
conv2d_44 (Conv2D) (None, 4, 4, 256) 295168
batch_normalization_44 (Bat (None, 4, 4, 256) 1024
chNormalization)
conv2d_45 (Conv2D) (None, 4, 4, 256) 590080
batch_normalization_45 (Bat (None, 4, 4, 256) 1024
chNormalization)
max_pooling2d_19 (MaxPoolin (None, 2, 2, 256) 0
g2D)
dropout_23 (Dropout) (None, 2, 2, 256) 0
flatten_4 (Flatten) (None, 1024) 0
dense_8 (Dense) (None, 128) 131200
dropout_24 (Dropout) (None, 128) 0
dense_9 (Dense) (None, 20) 2580
=================================================================
Total params: 1,309,876
Trainable params: 1,307,956
Non-trainable params: 1,920
_________________________________________________________________
Epoch 1/100
781/782 [============================>.] - ETA: 0s - loss: 3.0233 - accuracy: 0.2033
Epoch 1: val_accuracy improved from -inf to 0.29620, saving model to coarselabel2.h5
782/782 [==============================] - 30s 36ms/step - loss: 3.0232 - accuracy: 0.2033 - val_loss: 2.4765 - val_accuracy: 0.2962
Epoch 2/100
781/782 [============================>.] - ETA: 0s - loss: 2.2505 - accuracy: 0.3300
Epoch 2: val_accuracy improved from 0.29620 to 0.40910, saving model to coarselabel2.h5
782/782 [==============================] - 28s 36ms/step - loss: 2.2504 - accuracy: 0.3300 - val_loss: 1.9564 - val_accuracy: 0.4091
Epoch 3/100
781/782 [============================>.] - ETA: 0s - loss: 1.9412 - accuracy: 0.4085
Epoch 3: val_accuracy improved from 0.40910 to 0.47880, saving model to coarselabel2.h5
782/782 [==============================] - 28s 36ms/step - loss: 1.9411 - accuracy: 0.4085 - val_loss: 1.6898 - val_accuracy: 0.4788
Epoch 4/100
781/782 [============================>.] - ETA: 0s - loss: 1.7483 - accuracy: 0.4611
Epoch 4: val_accuracy improved from 0.47880 to 0.52750, saving model to coarselabel2.h5
782/782 [==============================] - 28s 36ms/step - loss: 1.7483 - accuracy: 0.4610 - val_loss: 1.5120 - val_accuracy: 0.5275
Epoch 5/100
781/782 [============================>.] - ETA: 0s - loss: 1.6057 - accuracy: 0.5030
Epoch 5: val_accuracy improved from 0.52750 to 0.56690, saving model to coarselabel2.h5
782/782 [==============================] - 28s 36ms/step - loss: 1.6058 - accuracy: 0.5029 - val_loss: 1.3668 - val_accuracy: 0.5669
Epoch 6/100
781/782 [============================>.] - ETA: 0s - loss: 1.4951 - accuracy: 0.5371
Epoch 6: val_accuracy improved from 0.56690 to 0.59320, saving model to coarselabel2.h5
782/782 [==============================] - 28s 36ms/step - loss: 1.4950 - accuracy: 0.5371 - val_loss: 1.2893 - val_accuracy: 0.5932
Epoch 7/100
781/782 [============================>.] - ETA: 0s - loss: 1.4123 - accuracy: 0.5630
Epoch 7: val_accuracy improved from 0.59320 to 0.61480, saving model to coarselabel2.h5
782/782 [==============================] - 28s 36ms/step - loss: 1.4122 - accuracy: 0.5630 - val_loss: 1.2107 - val_accuracy: 0.6148
Epoch 8/100
781/782 [============================>.] - ETA: 0s - loss: 1.3389 - accuracy: 0.5859
Epoch 8: val_accuracy improved from 0.61480 to 0.63710, saving model to coarselabel2.h5
782/782 [==============================] - 28s 36ms/step - loss: 1.3388 - accuracy: 0.5859 - val_loss: 1.1517 - val_accuracy: 0.6371
Epoch 9/100
781/782 [============================>.] - ETA: 0s - loss: 1.2763 - accuracy: 0.6045
Epoch 9: val_accuracy did not improve from 0.63710
782/782 [==============================] - 29s 37ms/step - loss: 1.2762 - accuracy: 0.6045 - val_loss: 1.1786 - val_accuracy: 0.6322
Epoch 10/100
781/782 [============================>.] - ETA: 0s - loss: 1.2194 - accuracy: 0.6219
Epoch 10: val_accuracy improved from 0.63710 to 0.66280, saving model to coarselabel2.h5
782/782 [==============================] - 28s 36ms/step - loss: 1.2193 - accuracy: 0.6219 - val_loss: 1.0921 - val_accuracy: 0.6628
Epoch 11/100
781/782 [============================>.] - ETA: 0s - loss: 1.1712 - accuracy: 0.6373
Epoch 11: val_accuracy did not improve from 0.66280
782/782 [==============================] - 28s 36ms/step - loss: 1.1712 - accuracy: 0.6373 - val_loss: 1.0917 - val_accuracy: 0.6626
Epoch 12/100
781/782 [============================>.] - ETA: 0s - loss: 1.1362 - accuracy: 0.6470
Epoch 12: val_accuracy improved from 0.66280 to 0.68040, saving model to coarselabel2.h5
782/782 [==============================] - 28s 36ms/step - loss: 1.1362 - accuracy: 0.6470 - val_loss: 1.0570 - val_accuracy: 0.6804
Epoch 13/100
781/782 [============================>.] - ETA: 0s - loss: 1.0975 - accuracy: 0.6604
Epoch 13: val_accuracy did not improve from 0.68040
782/782 [==============================] - 28s 36ms/step - loss: 1.0975 - accuracy: 0.6604 - val_loss: 1.0535 - val_accuracy: 0.6742
Epoch 14/100
781/782 [============================>.] - ETA: 0s - loss: 1.0630 - accuracy: 0.6705
Epoch 14: val_accuracy improved from 0.68040 to 0.68230, saving model to coarselabel2.h5
782/782 [==============================] - 28s 36ms/step - loss: 1.0630 - accuracy: 0.6705 - val_loss: 1.0441 - val_accuracy: 0.6823
Epoch 15/100
781/782 [============================>.] - ETA: 0s - loss: 1.0365 - accuracy: 0.6785
Epoch 15: val_accuracy improved from 0.68230 to 0.68250, saving model to coarselabel2.h5
782/782 [==============================] - 28s 36ms/step - loss: 1.0364 - accuracy: 0.6785 - val_loss: 1.0500 - val_accuracy: 0.6825
Epoch 16/100
781/782 [============================>.] - ETA: 0s - loss: 1.0047 - accuracy: 0.6888
Epoch 16: val_accuracy improved from 0.68250 to 0.68360, saving model to coarselabel2.h5
782/782 [==============================] - 28s 36ms/step - loss: 1.0048 - accuracy: 0.6888 - val_loss: 1.0523 - val_accuracy: 0.6836
Epoch 17/100
781/782 [============================>.] - ETA: 0s - loss: 0.9824 - accuracy: 0.6960
Epoch 17: val_accuracy improved from 0.68360 to 0.69020, saving model to coarselabel2.h5
782/782 [==============================] - 28s 36ms/step - loss: 0.9824 - accuracy: 0.6960 - val_loss: 1.0228 - val_accuracy: 0.6902
Epoch 18/100
781/782 [============================>.] - ETA: 0s - loss: 0.9558 - accuracy: 0.7039
Epoch 18: val_accuracy did not improve from 0.69020
782/782 [==============================] - 29s 37ms/step - loss: 0.9558 - accuracy: 0.7038 - val_loss: 1.0137 - val_accuracy: 0.6901
Epoch 19/100
781/782 [============================>.] - ETA: 0s - loss: 0.9361 - accuracy: 0.7111
Epoch 19: val_accuracy did not improve from 0.69020
782/782 [==============================] - 29s 37ms/step - loss: 0.9360 - accuracy: 0.7111 - val_loss: 1.0526 - val_accuracy: 0.6838
Epoch 20/100
781/782 [============================>.] - ETA: 0s - loss: 0.9153 - accuracy: 0.7164
Epoch 20: val_accuracy did not improve from 0.69020
782/782 [==============================] - 27s 34ms/step - loss: 0.9153 - accuracy: 0.7164 - val_loss: 1.0415 - val_accuracy: 0.6893
Epoch 21/100
781/782 [============================>.] - ETA: 0s - loss: 0.8957 - accuracy: 0.7226
Epoch 21: val_accuracy improved from 0.69020 to 0.69160, saving model to coarselabel2.h5
782/782 [==============================] - 26s 34ms/step - loss: 0.8957 - accuracy: 0.7226 - val_loss: 1.0340 - val_accuracy: 0.6916
Epoch 22/100
781/782 [============================>.] - ETA: 0s - loss: 0.8795 - accuracy: 0.7287
Epoch 22: val_accuracy improved from 0.69160 to 0.69790, saving model to coarselabel2.h5
782/782 [==============================] - 28s 36ms/step - loss: 0.8794 - accuracy: 0.7287 - val_loss: 0.9971 - val_accuracy: 0.6979
Epoch 23/100
781/782 [============================>.] - ETA: 0s - loss: 0.8645 - accuracy: 0.7323
Epoch 23: val_accuracy improved from 0.69790 to 0.70750, saving model to coarselabel2.h5
782/782 [==============================] - 28s 36ms/step - loss: 0.8645 - accuracy: 0.7323 - val_loss: 0.9897 - val_accuracy: 0.7075
Epoch 24/100
781/782 [============================>.] - ETA: 0s - loss: 0.8512 - accuracy: 0.7370
Epoch 24: val_accuracy did not improve from 0.70750
782/782 [==============================] - 28s 36ms/step - loss: 0.8511 - accuracy: 0.7370 - val_loss: 1.0523 - val_accuracy: 0.6939
Epoch 25/100
781/782 [============================>.] - ETA: 0s - loss: 0.8387 - accuracy: 0.7423
Epoch 25: val_accuracy did not improve from 0.70750
782/782 [==============================] - 28s 36ms/step - loss: 0.8387 - accuracy: 0.7423 - val_loss: 1.0027 - val_accuracy: 0.7033
Epoch 26/100
781/782 [============================>.] - ETA: 0s - loss: 0.8203 - accuracy: 0.7466
Epoch 26: val_accuracy did not improve from 0.70750
782/782 [==============================] - 28s 36ms/step - loss: 0.8203 - accuracy: 0.7466 - val_loss: 1.0163 - val_accuracy: 0.7033
Epoch 27/100
782/782 [==============================] - ETA: 0s - loss: 0.8079 - accuracy: 0.7503
Epoch 27: val_accuracy did not improve from 0.70750
782/782 [==============================] - 29s 37ms/step - loss: 0.8079 - accuracy: 0.7503 - val_loss: 1.0117 - val_accuracy: 0.7073
Epoch 28/100
781/782 [============================>.] - ETA: 0s - loss: 0.7946 - accuracy: 0.7553
Epoch 28: val_accuracy did not improve from 0.70750
782/782 [==============================] - 32s 41ms/step - loss: 0.7947 - accuracy: 0.7553 - val_loss: 1.0480 - val_accuracy: 0.6952
Epoch 29/100
781/782 [============================>.] - ETA: 0s - loss: 0.7894 - accuracy: 0.7560
Epoch 29: val_accuracy improved from 0.70750 to 0.71030, saving model to coarselabel2.h5
782/782 [==============================] - 33s 43ms/step - loss: 0.7894 - accuracy: 0.7560 - val_loss: 0.9976 - val_accuracy: 0.7103
Epoch 30/100
781/782 [============================>.] - ETA: 0s - loss: 0.7780 - accuracy: 0.7593
Epoch 30: val_accuracy did not improve from 0.71030
782/782 [==============================] - 28s 36ms/step - loss: 0.7780 - accuracy: 0.7593 - val_loss: 1.0211 - val_accuracy: 0.7050
Epoch 31/100
781/782 [============================>.] - ETA: 0s - loss: 0.7641 - accuracy: 0.7638
Epoch 31: val_accuracy did not improve from 0.71030
782/782 [==============================] - 29s 37ms/step - loss: 0.7642 - accuracy: 0.7638 - val_loss: 1.0408 - val_accuracy: 0.7010
Epoch 32/100
781/782 [============================>.] - ETA: 0s - loss: 0.7574 - accuracy: 0.7673
Epoch 32: val_accuracy improved from 0.71030 to 0.71500, saving model to coarselabel2.h5
782/782 [==============================] - 29s 36ms/step - loss: 0.7574 - accuracy: 0.7673 - val_loss: 0.9939 - val_accuracy: 0.7150
Epoch 33/100
781/782 [============================>.] - ETA: 0s - loss: 0.7492 - accuracy: 0.7691
Epoch 33: val_accuracy did not improve from 0.71500
782/782 [==============================] - 29s 37ms/step - loss: 0.7492 - accuracy: 0.7691 - val_loss: 1.0193 - val_accuracy: 0.7104
Epoch 33: early stopping
# load coarse label model
model = load_model('coarselabel2.h5')
loss,acc = model.evaluate(X_test,y_test)
print("Coarse model, accuracy: {:5.2f}%".format(100*acc))
print("Coarse model, loss:", loss)
# plot confusion matrix
y_pred = model.predict(X_test)
pred=np.argmax(y_pred,axis=1)
classifation_matrix=confusion_matrix(y_test_label,pred)
plt.figure(figsize=(10,10))
sns.heatmap(classifation_matrix,annot=True,fmt='d')
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.show()
313/313 [==============================] - 2s 6ms/step - loss: 0.9536 - accuracy: 0.7249 Coarse model, accuracy: 72.49% Coarse model, loss: 0.9535974860191345 313/313 [==============================] - 2s 5ms/step